From 0a683794ace283984ae95ea6796f37b5f3afc446 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 1 Jun 2021 13:40:02 +1000 Subject: xfs: split up xfs_buf_allocate_memory Based on a patch from Christoph Hellwig. This splits out the heap allocation and page allocation portions of the buffer memory allocation into two separate helper functions. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 126 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 74 insertions(+), 52 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 592800c8852f..2e35d344a69b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -347,65 +347,55 @@ xfs_buf_free( kmem_cache_free(xfs_buf_zone, bp); } -/* - * Allocates all the pages for buffer in question and builds it's page list. - */ -STATIC int -xfs_buf_allocate_memory( - struct xfs_buf *bp, - uint flags) +static int +xfs_buf_alloc_kmem( + struct xfs_buf *bp, + size_t size, + xfs_buf_flags_t flags) { - size_t size; - size_t nbytes, offset; - gfp_t gfp_mask = xb_to_gfp(flags); - unsigned short page_count, i; - xfs_off_t start, end; - int error; - xfs_km_flags_t kmflag_mask = 0; + int align_mask = xfs_buftarg_dma_alignment(bp->b_target); + xfs_km_flags_t kmflag_mask = KM_NOFS; - /* - * assure zeroed buffer for non-read cases. - */ - if (!(flags & XBF_READ)) { + /* Assure zeroed buffer for non-read cases. */ + if (!(flags & XBF_READ)) kmflag_mask |= KM_ZERO; - gfp_mask |= __GFP_ZERO; - } - /* - * for buffers that are contained within a single page, just allocate - * the memory from the heap - there's no need for the complexity of - * page arrays to keep allocation down to order 0. - */ - size = BBTOB(bp->b_length); - if (size < PAGE_SIZE) { - int align_mask = xfs_buftarg_dma_alignment(bp->b_target); - bp->b_addr = kmem_alloc_io(size, align_mask, - KM_NOFS | kmflag_mask); - if (!bp->b_addr) { - /* low memory - use alloc_page loop instead */ - goto use_alloc_page; - } + bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask); + if (!bp->b_addr) + return -ENOMEM; - if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != - ((unsigned long)bp->b_addr & PAGE_MASK)) { - /* b_addr spans two pages - use alloc_page instead */ - kmem_free(bp->b_addr); - bp->b_addr = NULL; - goto use_alloc_page; - } - bp->b_offset = offset_in_page(bp->b_addr); - bp->b_pages = bp->b_page_array; - bp->b_pages[0] = kmem_to_page(bp->b_addr); - bp->b_page_count = 1; - bp->b_flags |= _XBF_KMEM; - return 0; + if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != + ((unsigned long)bp->b_addr & PAGE_MASK)) { + /* b_addr spans two pages - use alloc_page instead */ + kmem_free(bp->b_addr); + bp->b_addr = NULL; + return -ENOMEM; } + bp->b_offset = offset_in_page(bp->b_addr); + bp->b_pages = bp->b_page_array; + bp->b_pages[0] = kmem_to_page(bp->b_addr); + bp->b_page_count = 1; + bp->b_flags |= _XBF_KMEM; + return 0; +} + +static int +xfs_buf_alloc_pages( + struct xfs_buf *bp, + uint page_count, + xfs_buf_flags_t flags) +{ + gfp_t gfp_mask = xb_to_gfp(flags); + size_t size; + size_t offset; + size_t nbytes; + int i; + int error; + + /* Assure zeroed buffer for non-read cases. */ + if (!(flags & XBF_READ)) + gfp_mask |= __GFP_ZERO; -use_alloc_page: - start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; - end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) - >> PAGE_SHIFT; - page_count = end - start; error = _xfs_buf_get_pages(bp, page_count); if (unlikely(error)) return error; @@ -458,6 +448,38 @@ out_free_pages: return error; } + +/* + * Allocates all the pages for buffer in question and builds it's page list. + */ +static int +xfs_buf_allocate_memory( + struct xfs_buf *bp, + uint flags) +{ + size_t size; + xfs_off_t start, end; + int error; + + /* + * For buffers that fit entirely within a single page, first attempt to + * allocate the memory from the heap to minimise memory usage. If we + * can't get heap memory for these small buffers, we fall back to using + * the page allocator. + */ + size = BBTOB(bp->b_length); + if (size < PAGE_SIZE) { + error = xfs_buf_alloc_kmem(bp, size, flags); + if (!error) + return 0; + } + + start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; + end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) + >> PAGE_SHIFT; + return xfs_buf_alloc_pages(bp, end - start, flags); +} + /* * Map buffer into kernel address-space if necessary. */ -- cgit v1.2.3 From 07b5c5add42a0afccf79401b12d78043ed6b8240 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 1 Jun 2021 13:40:35 +1000 Subject: xfs: use xfs_buf_alloc_pages for uncached buffers Use the newly factored out page allocation code. This adds automatic buffer zeroing for non-read uncached buffers. This also allows us to greatly simply the error handling in xfs_buf_get_uncached(). Because xfs_buf_alloc_pages() cleans up partial allocation failure, we can just call xfs_buf_free() in all error cases now to clean up after failures. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.c | 1 - fs/xfs/xfs_buf.c | 27 ++++++--------------------- 2 files changed, 6 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index c68a36688474..be0087825ae0 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -43,7 +43,6 @@ xfs_get_aghdr_buf( if (error) return error; - xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); bp->b_bn = blkno; bp->b_maps[0].bm_bn = blkno; bp->b_ops = ops; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 2e35d344a69b..b1610115d401 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -973,7 +973,7 @@ xfs_buf_get_uncached( struct xfs_buf **bpp) { unsigned long page_count; - int error, i; + int error; struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); @@ -982,41 +982,26 @@ xfs_buf_get_uncached( /* flags might contain irrelevant bits, pass only what we care about */ error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); if (error) - goto fail; + return error; page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; - error = _xfs_buf_get_pages(bp, page_count); + error = xfs_buf_alloc_pages(bp, page_count, flags); if (error) goto fail_free_buf; - for (i = 0; i < page_count; i++) { - bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); - if (!bp->b_pages[i]) { - error = -ENOMEM; - goto fail_free_mem; - } - } - bp->b_flags |= _XBF_PAGES; - error = _xfs_buf_map_pages(bp, 0); if (unlikely(error)) { xfs_warn(target->bt_mount, "%s: failed to map pages", __func__); - goto fail_free_mem; + goto fail_free_buf; } trace_xfs_buf_get_uncached(bp, _RET_IP_); *bpp = bp; return 0; - fail_free_mem: - while (--i >= 0) - __free_page(bp->b_pages[i]); - _xfs_buf_free_pages(bp); - fail_free_buf: - xfs_buf_free_maps(bp); - kmem_cache_free(xfs_buf_zone, bp); - fail: +fail_free_buf: + xfs_buf_free(bp); return error; } -- cgit v1.2.3 From c9fa563072e13337713a441cf30171feb4e96e6d Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 1 Jun 2021 13:40:36 +1000 Subject: xfs: use alloc_pages_bulk_array() for buffers Because it's more efficient than allocating pages one at a time in a loop. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 62 ++++++++++++++++++++++---------------------------------- 1 file changed, 24 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index b1610115d401..2749bc0bf726 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -386,10 +386,7 @@ xfs_buf_alloc_pages( xfs_buf_flags_t flags) { gfp_t gfp_mask = xb_to_gfp(flags); - size_t size; - size_t offset; - size_t nbytes; - int i; + long filled = 0; int error; /* Assure zeroed buffer for non-read cases. */ @@ -400,50 +397,39 @@ xfs_buf_alloc_pages( if (unlikely(error)) return error; - offset = bp->b_offset; bp->b_flags |= _XBF_PAGES; - for (i = 0; i < bp->b_page_count; i++) { - struct page *page; - uint retries = 0; -retry: - page = alloc_page(gfp_mask); - if (unlikely(page == NULL)) { - if (flags & XBF_READ_AHEAD) { - bp->b_page_count = i; - error = -ENOMEM; - goto out_free_pages; - } + /* + * Bulk filling of pages can take multiple calls. Not filling the entire + * array is not an allocation failure, so don't back off if we get at + * least one extra page. + */ + for (;;) { + long last = filled; - /* - * This could deadlock. - * - * But until all the XFS lowlevel code is revamped to - * handle buffer allocation failures we can't do much. - */ - if (!(++retries % 100)) - xfs_err(NULL, - "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", - current->comm, current->pid, - __func__, gfp_mask); - - XFS_STATS_INC(bp->b_mount, xb_page_retries); - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry; + filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count, + bp->b_pages); + if (filled == bp->b_page_count) { + XFS_STATS_INC(bp->b_mount, xb_page_found); + break; } - XFS_STATS_INC(bp->b_mount, xb_page_found); + if (filled != last) + continue; - nbytes = min_t(size_t, size, PAGE_SIZE - offset); - size -= nbytes; - bp->b_pages[i] = page; - offset = 0; + if (flags & XBF_READ_AHEAD) { + error = -ENOMEM; + goto out_free_pages; + } + + XFS_STATS_INC(bp->b_mount, xb_page_retries); + congestion_wait(BLK_RW_ASYNC, HZ / 50); } return 0; out_free_pages: - for (i = 0; i < bp->b_page_count; i++) - __free_page(bp->b_pages[i]); + while (--filled >= 0) + __free_page(bp->b_pages[filled]); bp->b_flags &= ~_XBF_PAGES; return error; } -- cgit v1.2.3 From 02c5117386884e06b6e78b72288f1e0af4320dc1 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 1 Jun 2021 13:40:36 +1000 Subject: xfs: merge _xfs_buf_get_pages() Only called from one place now, so merge it into xfs_buf_alloc_pages(). Because page array allocation is dependent on bp->b_pages being null, always ensure that when the pages array is freed we always set bp->b_pages to null. Also convert the page array to use kmalloc() rather than kmem_alloc() so we can use the gfp flags we've already calculated for the allocation context instead of hard coding KM_NOFS semantics. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 48 ++++++++++++++---------------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 2749bc0bf726..a6fcd829c1ea 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -272,31 +272,6 @@ _xfs_buf_alloc( return 0; } -/* - * Allocate a page array capable of holding a specified number - * of pages, and point the page buf at it. - */ -STATIC int -_xfs_buf_get_pages( - struct xfs_buf *bp, - int page_count) -{ - /* Make sure that we have a page list */ - if (bp->b_pages == NULL) { - bp->b_page_count = page_count; - if (page_count <= XB_PAGES) { - bp->b_pages = bp->b_page_array; - } else { - bp->b_pages = kmem_alloc(sizeof(struct page *) * - page_count, KM_NOFS); - if (bp->b_pages == NULL) - return -ENOMEM; - } - memset(bp->b_pages, 0, sizeof(struct page *) * page_count); - } - return 0; -} - /* * Frees b_pages if it was allocated. */ @@ -304,10 +279,9 @@ STATIC void _xfs_buf_free_pages( struct xfs_buf *bp) { - if (bp->b_pages != bp->b_page_array) { + if (bp->b_pages != bp->b_page_array) kmem_free(bp->b_pages); - bp->b_pages = NULL; - } + bp->b_pages = NULL; } /* @@ -389,16 +363,22 @@ xfs_buf_alloc_pages( long filled = 0; int error; + /* Make sure that we have a page list */ + bp->b_page_count = page_count; + if (bp->b_page_count <= XB_PAGES) { + bp->b_pages = bp->b_page_array; + } else { + bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count, + gfp_mask); + if (!bp->b_pages) + return -ENOMEM; + } + bp->b_flags |= _XBF_PAGES; + /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) gfp_mask |= __GFP_ZERO; - error = _xfs_buf_get_pages(bp, page_count); - if (unlikely(error)) - return error; - - bp->b_flags |= _XBF_PAGES; - /* * Bulk filling of pages can take multiple calls. Not filling the entire * array is not an allocation failure, so don't back off if we get at -- cgit v1.2.3 From e7d236a6fe5102092c463112124cf52e4d71885e Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 1 Jun 2021 13:40:36 +1000 Subject: xfs: move page freeing into _xfs_buf_free_pages() Rather than open coding it just before we call _xfs_buf_free_pages(). Also, rename the function to xfs_buf_free_pages() as the leading underscore has no useful meaning. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 61 +++++++++++++++++++++----------------------------------- 1 file changed, 23 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index a6fcd829c1ea..d02edb683cfd 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -272,25 +272,30 @@ _xfs_buf_alloc( return 0; } -/* - * Frees b_pages if it was allocated. - */ -STATIC void -_xfs_buf_free_pages( +static void +xfs_buf_free_pages( struct xfs_buf *bp) { + uint i; + + ASSERT(bp->b_flags & _XBF_PAGES); + + if (xfs_buf_is_vmapped(bp)) + vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); + + for (i = 0; i < bp->b_page_count; i++) { + if (bp->b_pages[i]) + __free_page(bp->b_pages[i]); + } + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += bp->b_page_count; + if (bp->b_pages != bp->b_page_array) kmem_free(bp->b_pages); bp->b_pages = NULL; + bp->b_flags &= ~_XBF_PAGES; } -/* - * Releases the specified buffer. - * - * The modification state of any associated pages is left unchanged. - * The buffer must not be on any hash - use xfs_buf_rele instead for - * hashed and refcounted buffers - */ static void xfs_buf_free( struct xfs_buf *bp) @@ -299,24 +304,11 @@ xfs_buf_free( ASSERT(list_empty(&bp->b_lru)); - if (bp->b_flags & _XBF_PAGES) { - uint i; - - if (xfs_buf_is_vmapped(bp)) - vm_unmap_ram(bp->b_addr - bp->b_offset, - bp->b_page_count); - - for (i = 0; i < bp->b_page_count; i++) { - struct page *page = bp->b_pages[i]; - - __free_page(page); - } - if (current->reclaim_state) - current->reclaim_state->reclaimed_slab += - bp->b_page_count; - } else if (bp->b_flags & _XBF_KMEM) + if (bp->b_flags & _XBF_PAGES) + xfs_buf_free_pages(bp); + else if (bp->b_flags & _XBF_KMEM) kmem_free(bp->b_addr); - _xfs_buf_free_pages(bp); + xfs_buf_free_maps(bp); kmem_cache_free(xfs_buf_zone, bp); } @@ -361,7 +353,6 @@ xfs_buf_alloc_pages( { gfp_t gfp_mask = xb_to_gfp(flags); long filled = 0; - int error; /* Make sure that we have a page list */ bp->b_page_count = page_count; @@ -398,20 +389,14 @@ xfs_buf_alloc_pages( continue; if (flags & XBF_READ_AHEAD) { - error = -ENOMEM; - goto out_free_pages; + xfs_buf_free_pages(bp); + return -ENOMEM; } XFS_STATS_INC(bp->b_mount, xb_page_retries); congestion_wait(BLK_RW_ASYNC, HZ / 50); } return 0; - -out_free_pages: - while (--filled >= 0) - __free_page(bp->b_pages[filled]); - bp->b_flags &= ~_XBF_PAGES; - return error; } -- cgit v1.2.3 From 54cd3aa6f8102f4648190fc93eb5dd8603de9b52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Jun 2021 11:49:50 +1000 Subject: xfs: remove ->b_offset handling for page backed buffers ->b_offset can only be non-zero for _XBF_KMEM backed buffers, so remove all code dealing with it for page backed buffers. Signed-off-by: Christoph Hellwig [dgc: modified to fit this patchset] Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 8 +++----- fs/xfs/xfs_buf.h | 3 ++- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index d02edb683cfd..f901a74d6176 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -79,7 +79,7 @@ static inline int xfs_buf_vmap_len( struct xfs_buf *bp) { - return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; + return (bp->b_page_count * PAGE_SIZE); } /* @@ -281,7 +281,7 @@ xfs_buf_free_pages( ASSERT(bp->b_flags & _XBF_PAGES); if (xfs_buf_is_vmapped(bp)) - vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); + vm_unmap_ram(bp->b_addr, bp->b_page_count); for (i = 0; i < bp->b_page_count; i++) { if (bp->b_pages[i]) @@ -442,7 +442,7 @@ _xfs_buf_map_pages( ASSERT(bp->b_flags & _XBF_PAGES); if (bp->b_page_count == 1) { /* A single page buffer is always mappable */ - bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; + bp->b_addr = page_address(bp->b_pages[0]); } else if (flags & XBF_UNMAPPED) { bp->b_addr = NULL; } else { @@ -469,7 +469,6 @@ _xfs_buf_map_pages( if (!bp->b_addr) return -ENOMEM; - bp->b_addr += bp->b_offset; } return 0; @@ -1680,7 +1679,6 @@ xfs_buf_offset( if (bp->b_addr) return bp->b_addr + offset; - offset += bp->b_offset; page = bp->b_pages[offset >> PAGE_SHIFT]; return page_address(page) + (offset & (PAGE_SIZE-1)); } diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 459ca34f26f5..464dc548fa23 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -167,7 +167,8 @@ struct xfs_buf { atomic_t b_pin_count; /* pin count */ atomic_t b_io_remaining; /* #outstanding I/O requests */ unsigned int b_page_count; /* size of page array */ - unsigned int b_offset; /* page offset in first page */ + unsigned int b_offset; /* page offset of b_addr, + only for _XBF_KMEM buffers */ int b_error; /* error code on I/O */ /* -- cgit v1.2.3 From 934d1076bb2c5bbb3d5b0e3892b208d1f537949d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Jun 2021 11:50:00 +1000 Subject: xfs: simplify the b_page_count calculation Ever since we stopped using the Linux page cache to back XFS buffers there is no need to take the start sector into account for calculating the number of pages in a buffer, as the data always start from the beginning of the buffer. Signed-off-by: Christoph Hellwig [dgc: modified to suit this series] Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index f901a74d6176..5c6a7140d28d 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -348,14 +348,13 @@ xfs_buf_alloc_kmem( static int xfs_buf_alloc_pages( struct xfs_buf *bp, - uint page_count, xfs_buf_flags_t flags) { gfp_t gfp_mask = xb_to_gfp(flags); long filled = 0; /* Make sure that we have a page list */ - bp->b_page_count = page_count; + bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE); if (bp->b_page_count <= XB_PAGES) { bp->b_pages = bp->b_page_array; } else { @@ -409,7 +408,6 @@ xfs_buf_allocate_memory( uint flags) { size_t size; - xfs_off_t start, end; int error; /* @@ -424,11 +422,7 @@ xfs_buf_allocate_memory( if (!error) return 0; } - - start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; - end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) - >> PAGE_SHIFT; - return xfs_buf_alloc_pages(bp, end - start, flags); + return xfs_buf_alloc_pages(bp, flags); } /* @@ -922,7 +916,6 @@ xfs_buf_get_uncached( int flags, struct xfs_buf **bpp) { - unsigned long page_count; int error; struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); @@ -934,8 +927,7 @@ xfs_buf_get_uncached( if (error) return error; - page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; - error = xfs_buf_alloc_pages(bp, page_count, flags); + error = xfs_buf_alloc_pages(bp, flags); if (error) goto fail_free_buf; -- cgit v1.2.3 From 289ae7b48c2c4d9bec515e720c01146498109dee Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 7 Jun 2021 11:50:17 +1000 Subject: xfs: get rid of xb_to_gfp() Only used in one place, so just open code the logic in the macro. Based on a patch from Christoph Hellwig. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 5c6a7140d28d..0b0d66d31515 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -22,9 +22,6 @@ static kmem_zone_t *xfs_buf_zone; -#define xb_to_gfp(flags) \ - ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) - /* * Locking orders * @@ -350,9 +347,14 @@ xfs_buf_alloc_pages( struct xfs_buf *bp, xfs_buf_flags_t flags) { - gfp_t gfp_mask = xb_to_gfp(flags); + gfp_t gfp_mask = __GFP_NOWARN; long filled = 0; + if (flags & XBF_READ_AHEAD) + gfp_mask |= __GFP_NORETRY; + else + gfp_mask |= GFP_NOFS; + /* Make sure that we have a page list */ bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE); if (bp->b_page_count <= XB_PAGES) { -- cgit v1.2.3 From 170041f71596dad3f34dea40ee0ef0c848d3f906 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Jun 2021 11:50:47 +1000 Subject: xfs: cleanup error handling in xfs_buf_get_map Use a single goto label for freeing the buffer and returning an error. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Dave Chinner --- fs/xfs/xfs_buf.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 0b0d66d31515..7dea73535959 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -689,16 +689,12 @@ xfs_buf_get_map( return error; error = xfs_buf_allocate_memory(new_bp, flags); - if (error) { - xfs_buf_free(new_bp); - return error; - } + if (error) + goto out_free_buf; error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); - if (error) { - xfs_buf_free(new_bp); - return error; - } + if (error) + goto out_free_buf; if (bp != new_bp) xfs_buf_free(new_bp); @@ -726,6 +722,9 @@ found: trace_xfs_buf_get(bp, flags, _RET_IP_); *bpp = bp; return 0; +out_free_buf: + xfs_buf_free(new_bp); + return error; } int -- cgit v1.2.3 From 8bcac7448a942fa4662441a310c97d47cec24310 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 7 Jun 2021 11:50:48 +1000 Subject: xfs: merge xfs_buf_allocate_memory It only has one caller and is now a simple function, so merge it into the caller. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 44 +++++++++++++------------------------------- 1 file changed, 13 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7dea73535959..a55471612150 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -313,11 +313,11 @@ xfs_buf_free( static int xfs_buf_alloc_kmem( struct xfs_buf *bp, - size_t size, xfs_buf_flags_t flags) { int align_mask = xfs_buftarg_dma_alignment(bp->b_target); xfs_km_flags_t kmflag_mask = KM_NOFS; + size_t size = BBTOB(bp->b_length); /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) @@ -400,33 +400,6 @@ xfs_buf_alloc_pages( return 0; } - -/* - * Allocates all the pages for buffer in question and builds it's page list. - */ -static int -xfs_buf_allocate_memory( - struct xfs_buf *bp, - uint flags) -{ - size_t size; - int error; - - /* - * For buffers that fit entirely within a single page, first attempt to - * allocate the memory from the heap to minimise memory usage. If we - * can't get heap memory for these small buffers, we fall back to using - * the page allocator. - */ - size = BBTOB(bp->b_length); - if (size < PAGE_SIZE) { - error = xfs_buf_alloc_kmem(bp, size, flags); - if (!error) - return 0; - } - return xfs_buf_alloc_pages(bp, flags); -} - /* * Map buffer into kernel address-space if necessary. */ @@ -688,9 +661,18 @@ xfs_buf_get_map( if (error) return error; - error = xfs_buf_allocate_memory(new_bp, flags); - if (error) - goto out_free_buf; + /* + * For buffers that fit entirely within a single page, first attempt to + * allocate the memory from the heap to minimise memory usage. If we + * can't get heap memory for these small buffers, we fall back to using + * the page allocator. + */ + if (BBTOB(new_bp->b_length) >= PAGE_SIZE || + xfs_buf_alloc_kmem(new_bp, flags) < 0) { + error = xfs_buf_alloc_pages(new_bp, flags); + if (error) + goto out_free_buf; + } error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); if (error) -- cgit v1.2.3