diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-01-15 15:58:25 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-01-15 15:58:25 -0500 |
commit | 7bcc1ec07748cae3552dc9b46701c117926c8923 (patch) | |
tree | 2b3edc7de77ca306b2559ae341077094bac8c4a2 /mm/shmem.c | |
parent | e5c702d3b268066dc70d619ecff06a08065f343f (diff) | |
parent | 29594404d7fe73cd80eaa4ee8c43dcc53970c60e (diff) | |
download | linux-7bcc1ec07748cae3552dc9b46701c117926c8923.tar.gz linux-7bcc1ec07748cae3552dc9b46701c117926c8923.tar.bz2 linux-7bcc1ec07748cae3552dc9b46701c117926c8923.zip |
Merge tag 'v3.7' into stable/for-linus-3.8
Linux 3.7
* tag 'v3.7': (833 commits)
Linux 3.7
Input: matrix-keymap - provide proper module license
Revert "revert "Revert "mm: remove __GFP_NO_KSWAPD""" and associated damage
ipv4: ip_check_defrag must not modify skb before unsharing
Revert "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended"
inet_diag: validate port comparison byte code to prevent unsafe reads
inet_diag: avoid unsafe and nonsensical prefix matches in inet_diag_bc_run()
inet_diag: validate byte code to prevent oops in inet_diag_bc_run()
inet_diag: fix oops for IPv4 AF_INET6 TCP SYN-RECV state
mm: vmscan: fix inappropriate zone congestion clearing
vfs: fix O_DIRECT read past end of block device
net: gro: fix possible panic in skb_gro_receive()
tcp: bug fix Fast Open client retransmission
tmpfs: fix shared mempolicy leak
mm: vmscan: do not keep kswapd looping forever due to individual uncompactable zones
mm: compaction: validate pfn range passed to isolate_freepages_block
mmc: sh-mmcif: avoid oops on spurious interrupts (second try)
Revert misapplied "mmc: sh-mmcif: avoid oops on spurious interrupts"
mmc: sdhci-s3c: fix missing clock for gpio card-detect
lib/Makefile: Fix oid_registry build dependency
...
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Conflicts:
arch/arm/xen/enlighten.c
drivers/xen/Makefile
[We need to have the v3.7 base as the 'for-3.8' was based off v3.7-rc3
and there are some patches in v3.7-rc6 that we to have in our branch]
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 67afba5117f2..50c5b8f3a359 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode) kfree(info->symlink); simple_xattrs_free(&info->xattrs); - BUG_ON(inode->i_blocks); + WARN_ON(inode->i_blocks); shmem_free_inode(inode->i_sb); clear_inode(inode); } @@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { - struct mempolicy mpol, *spol; struct vm_area_struct pvma; - - spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, index)); + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; /* Bias interleave by inode number to distribute better across nodes */ pvma.vm_pgoff = index + info->vfs_inode.i_ino; pvma.vm_ops = NULL; - pvma.vm_policy = spol; - return swapin_readahead(swap, gfp, &pvma, 0); + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + + page = swapin_readahead(swap, gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; @@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, pvma.vm_ops = NULL; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); - /* - * alloc_page_vma() will drop the shared policy reference - */ - return alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } #else /* !CONFIG_NUMA */ #ifdef CONFIG_TMPFS @@ -1145,8 +1151,20 @@ repeat: if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); - /* We already confirmed swap, and make no allocation */ - VM_BUG_ON(error); + /* + * We already confirmed swap under page lock, and make + * no memory allocation here, so usually no possibility + * of error; but free_swap_and_cache() only trylocks a + * page, so it is just possible that the entry has been + * truncated or holepunched since swap was confirmed. + * shmem_undo_range() will have done some of the + * unaccounting, now delete_from_swap_cache() will do + * the rest (including mem_cgroup_uncharge_swapcache). + * Reset swap.val? No, leave it so "failed" goes back to + * "repeat": reading a hole and writing should succeed. + */ + if (error) + delete_from_swap_cache(page); } if (error) goto failed; |