diff options
author | Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> | 2019-08-26 12:06:22 -0700 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2019-08-26 12:06:22 -0700 |
commit | 707e0ddaf67e8942448ebdd16b523e409ebe40ce (patch) | |
tree | 081fe5a9335bd0856fc979e62ec18803623bdf6f | |
parent | a55aa89aab90fae7c815b0551b07be37db359d76 (diff) | |
download | lwn-707e0ddaf67e8942448ebdd16b523e409ebe40ce.tar.gz lwn-707e0ddaf67e8942448ebdd16b523e409ebe40ce.zip |
fs: xfs: Remove KM_NOSLEEP and KM_SLEEP.
Since no caller is using KM_NOSLEEP and no callee branches on KM_SLEEP,
we can remove KM_NOSLEEP and replace KM_SLEEP with 0.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
46 files changed, 102 insertions, 109 deletions
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 16bb9a328678..7cd315ad937e 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -17,7 +17,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags) do { ptr = kmalloc(size, lflags); - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) + if (ptr || (flags & KM_MAYFAIL)) return ptr; if (!(++retries % 100)) xfs_err(NULL, @@ -67,7 +67,7 @@ kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags) do { ptr = krealloc(old, newsize, lflags); - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) + if (ptr || (flags & KM_MAYFAIL)) return ptr; if (!(++retries % 100)) xfs_err(NULL, @@ -87,7 +87,7 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags) do { ptr = kmem_cache_alloc(zone, lflags); - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) + if (ptr || (flags & KM_MAYFAIL)) return ptr; if (!(++retries % 100)) xfs_err(NULL, diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 267655acd426..cb6fa7984ffa 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -16,8 +16,6 @@ */ typedef unsigned __bitwise xfs_km_flags_t; -#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u) -#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) @@ -32,15 +30,11 @@ kmem_flags_convert(xfs_km_flags_t flags) { gfp_t lflags; - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO)); + BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO)); - if (flags & KM_NOSLEEP) { - lflags = GFP_ATOMIC | __GFP_NOWARN; - } else { - lflags = GFP_KERNEL | __GFP_NOWARN; - if (flags & KM_NOFS) - lflags &= ~__GFP_FS; - } + lflags = GFP_KERNEL | __GFP_NOWARN; + if (flags & KM_NOFS) + lflags &= ~__GFP_FS; /* * Default page/slab allocator behavior is to retry for ever diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 372ad55631fc..533b04aaf6f6 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2205,7 +2205,7 @@ xfs_defer_agfl_block( ASSERT(xfs_bmap_free_item_zone != NULL); ASSERT(oinfo != NULL); - new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); + new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); new->xefi_blockcount = 1; new->xefi_oinfo = *oinfo; diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 70eb941d02e4..1408638c21c5 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -782,7 +782,7 @@ xfs_attr_shortform_to_leaf( ifp = dp->i_afp; sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; size = be16_to_cpu(sf->hdr.totsize); - tmpbuffer = kmem_alloc(size, KM_SLEEP); + tmpbuffer = kmem_alloc(size, 0); ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, ifp->if_u1.if_data, size); sf = (xfs_attr_shortform_t *)tmpbuffer; @@ -985,7 +985,7 @@ xfs_attr3_leaf_to_shortform( trace_xfs_attr_leaf_to_sf(args); - tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); + tmpbuffer = kmem_alloc(args->geo->blksize, 0); if (!tmpbuffer) return -ENOMEM; @@ -1448,7 +1448,7 @@ xfs_attr3_leaf_compact( trace_xfs_attr_leaf_compact(args); - tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); + tmpbuffer = kmem_alloc(args->geo->blksize, 0); memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); memset(bp->b_addr, 0, args->geo->blksize); leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; @@ -2167,7 +2167,7 @@ xfs_attr3_leaf_unbalance( struct xfs_attr_leafblock *tmp_leaf; struct xfs_attr3_icleaf_hdr tmphdr; - tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP); + tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0); /* * Copy the header into the temp leaf so that all the stuff diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 07aad70f3931..65f4348af9ae 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -553,7 +553,7 @@ __xfs_bmap_add_free( #endif ASSERT(xfs_bmap_free_item_zone != NULL); - new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); + new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); new->xefi_startblock = bno; new->xefi_blockcount = (xfs_extlen_t)len; if (oinfo) @@ -1099,7 +1099,7 @@ xfs_bmap_add_attrfork( if (error) goto trans_cancel; ASSERT(ip->i_afp == NULL); - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0); ip->i_afp->if_flags = XFS_IFEXTENTS; logflags = 0; switch (ip->i_d.di_format) { @@ -6094,7 +6094,7 @@ __xfs_bmap_add( bmap->br_blockcount, bmap->br_state); - bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); + bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS); INIT_LIST_HEAD(&bi->bi_list); bi->bi_type = type; bi->bi_owner = ip; diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index 0bf56e94bfe9..4fd1223c1bd5 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -2098,7 +2098,7 @@ xfs_da_grow_inode_int( * If we didn't get it and the block might work if fragmented, * try without the CONTIG flag. Loop until we get it all. */ - mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); + mapp = kmem_alloc(sizeof(*mapp) * count, 0); for (b = *bno, mapi = 0; b < *bno + count; ) { nmap = min(XFS_BMAP_MAX_NMAP, count); c = (int)(*bno + count - b); @@ -2480,7 +2480,7 @@ xfs_buf_map_from_irec( if (nirecs > 1) { map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), - KM_SLEEP | KM_NOFS); + KM_NOFS); if (!map) return -ENOMEM; *mapp = map; @@ -2539,7 +2539,7 @@ xfs_dabuf_map( */ if (nfsb != 1) irecs = kmem_zalloc(sizeof(irec) * nfsb, - KM_SLEEP | KM_NOFS); + KM_NOFS); nirecs = nfsb; error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index eb2be2a6a25a..22557527cfdb 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -517,7 +517,7 @@ xfs_defer_add( } if (!dfp) { dfp = kmem_alloc(sizeof(struct xfs_defer_pending), - KM_SLEEP | KM_NOFS); + KM_NOFS); dfp->dfp_type = type; dfp->dfp_intent = NULL; dfp->dfp_done = NULL; diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index 67840723edbb..867c5dee0751 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -110,9 +110,9 @@ xfs_da_mount( nodehdr_size = mp->m_dir_inode_ops->node_hdr_size; mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), - KM_SLEEP | KM_MAYFAIL); + KM_MAYFAIL); mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), - KM_SLEEP | KM_MAYFAIL); + KM_MAYFAIL); if (!mp->m_dir_geo || !mp->m_attr_geo) { kmem_free(mp->m_dir_geo); kmem_free(mp->m_attr_geo); @@ -217,7 +217,7 @@ xfs_dir_init( if (error) return error; - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); + args = kmem_zalloc(sizeof(*args), KM_NOFS); if (!args) return -ENOMEM; @@ -254,7 +254,7 @@ xfs_dir_createname( XFS_STATS_INC(dp->i_mount, xs_dir_create); } - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); + args = kmem_zalloc(sizeof(*args), KM_NOFS); if (!args) return -ENOMEM; @@ -353,7 +353,7 @@ xfs_dir_lookup( * lockdep Doing this avoids having to add a bunch of lockdep class * annotations into the reclaim path for the ilock. */ - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); + args = kmem_zalloc(sizeof(*args), KM_NOFS); args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; @@ -422,7 +422,7 @@ xfs_dir_removename( ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); XFS_STATS_INC(dp->i_mount, xs_dir_remove); - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); + args = kmem_zalloc(sizeof(*args), KM_NOFS); if (!args) return -ENOMEM; @@ -483,7 +483,7 @@ xfs_dir_replace( if (rval) return rval; - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); + args = kmem_zalloc(sizeof(*args), KM_NOFS); if (!args) return -ENOMEM; diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c index a6fb0cc2085e..9595ced393dc 100644 --- a/fs/xfs/libxfs/xfs_dir2_block.c +++ b/fs/xfs/libxfs/xfs_dir2_block.c @@ -1092,7 +1092,7 @@ xfs_dir2_sf_to_block( * Copy the directory into a temporary buffer. * Then pitch the incore inode data so we can make extents. */ - sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP); + sfp = kmem_alloc(ifp->if_bytes, 0); memcpy(sfp, oldsfp, ifp->if_bytes); xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 033589257f54..85f14fc2a8da 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -164,7 +164,7 @@ xfs_dir2_block_to_sf( * can free the block and copy the formatted data into the inode literal * area. */ - dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP); + dst = kmem_alloc(mp->m_sb.sb_inodesize, 0); hdr = bp->b_addr; /* @@ -436,7 +436,7 @@ xfs_dir2_sf_addname_hard( sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; old_isize = (int)dp->i_d.di_size; - buf = kmem_alloc(old_isize, KM_SLEEP); + buf = kmem_alloc(old_isize, 0); oldsfp = (xfs_dir2_sf_hdr_t *)buf; memcpy(oldsfp, sfp, old_isize); /* @@ -1096,7 +1096,7 @@ xfs_dir2_sf_toino4( * Don't want xfs_idata_realloc copying the data here. */ oldsize = dp->i_df.if_bytes; - buf = kmem_alloc(oldsize, KM_SLEEP); + buf = kmem_alloc(oldsize, 0); oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; ASSERT(oldsfp->i8count == 1); memcpy(buf, oldsfp, oldsize); @@ -1169,7 +1169,7 @@ xfs_dir2_sf_toino8( * Don't want xfs_idata_realloc copying the data here. */ oldsize = dp->i_df.if_bytes; - buf = kmem_alloc(oldsize, KM_SLEEP); + buf = kmem_alloc(oldsize, 0); oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; ASSERT(oldsfp->i8count == 0); memcpy(buf, oldsfp, oldsize); diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index bf3e04018246..c643beeb5a24 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -94,7 +94,7 @@ xfs_iformat_fork( return 0; ASSERT(ip->i_afp == NULL); - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); switch (dip->di_aformat) { case XFS_DINODE_FMT_LOCAL: @@ -147,7 +147,7 @@ xfs_init_local_fork( if (size) { real_size = roundup(mem_size, 4); - ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS); memcpy(ifp->if_u1.if_data, data, size); if (zero_terminate) ifp->if_u1.if_data[size] = '\0'; @@ -302,7 +302,7 @@ xfs_iformat_btree( } ifp->if_broot_bytes = size; - ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); + ifp->if_broot = kmem_alloc(size, KM_NOFS); ASSERT(ifp->if_broot != NULL); /* * Copy and convert from the on-disk structure @@ -367,7 +367,7 @@ xfs_iroot_realloc( */ if (ifp->if_broot_bytes == 0) { new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); - ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); + ifp->if_broot = kmem_alloc(new_size, KM_NOFS); ifp->if_broot_bytes = (int)new_size; return; } @@ -382,7 +382,7 @@ xfs_iroot_realloc( new_max = cur_max + rec_diff; new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, - KM_SLEEP | KM_NOFS); + KM_NOFS); op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, @@ -408,7 +408,7 @@ xfs_iroot_realloc( else new_size = 0; if (new_size > 0) { - new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); + new_broot = kmem_alloc(new_size, KM_NOFS); /* * First copy over the btree block header. */ @@ -492,7 +492,7 @@ xfs_idata_realloc( * We enforce that here. */ ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data, - roundup(new_size, 4), KM_SLEEP | KM_NOFS); + roundup(new_size, 4), KM_NOFS); ifp->if_bytes = new_size; } @@ -683,7 +683,7 @@ xfs_ifork_init_cow( return; ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone, - KM_SLEEP | KM_NOFS); + KM_NOFS); ip->i_cowfp->if_flags = XFS_IFEXTENTS; ip->i_cformat = XFS_DINODE_FMT_EXTENTS; ip->i_cnextents = 0; diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index 51bb9bdb0e84..14b9e3e056cc 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -1189,7 +1189,7 @@ __xfs_refcount_add( blockcount); ri = kmem_alloc(sizeof(struct xfs_refcount_intent), - KM_SLEEP | KM_NOFS); + KM_NOFS); INIT_LIST_HEAD(&ri->ri_list); ri->ri_type = type; ri->ri_startblock = startblock; @@ -1602,7 +1602,7 @@ xfs_refcount_recover_extent( if (be32_to_cpu(rec->refc.rc_refcount) != 1) return -EFSCORRUPTED; - rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP); + rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0); xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); list_add_tail(&rr->rr_list, debris); diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index e6aeb390b2fb..12a61f0c1e6a 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -2287,7 +2287,7 @@ __xfs_rmap_add( bmap->br_blockcount, bmap->br_state); - ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS); + ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS); INIT_LIST_HEAD(&ri->ri_list); ri->ri_type = type; ri->ri_owner = owner; diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c index 1afc58bf71dd..922a5154e2b8 100644 --- a/fs/xfs/scrub/attr.c +++ b/fs/xfs/scrub/attr.c @@ -80,7 +80,7 @@ xchk_setup_xattr( * without the inode lock held, which means we can sleep. */ if (sc->flags & XCHK_TRY_HARDER) { - error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, KM_SLEEP); + error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0); if (error) return error; } diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index fc3f510c9034..98f82d7c8b40 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -125,7 +125,7 @@ xchk_setup_fscounters( struct xchk_fscounters *fsc; int error; - sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP); + sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); if (!sc->buf) return -ENOMEM; fsc = sc->buf; diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c index 99c0b1234c3c..5641ae512c9e 100644 --- a/fs/xfs/scrub/symlink.c +++ b/fs/xfs/scrub/symlink.c @@ -22,7 +22,7 @@ xchk_setup_symlink( struct xfs_inode *ip) { /* Allocate the buffer without the inode lock held. */ - sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP); + sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0); if (!sc->buf) return -ENOMEM; diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index cbda40d40326..86c0697870a5 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -135,7 +135,7 @@ xfs_get_acl(struct inode *inode, int type) * go out to the disk. */ len = XFS_ACL_MAX_SIZE(ip->i_mount); - xfs_acl = kmem_zalloc_large(len, KM_SLEEP); + xfs_acl = kmem_zalloc_large(len, 0); if (!xfs_acl) return ERR_PTR(-ENOMEM); @@ -180,7 +180,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) struct xfs_acl *xfs_acl; int len = XFS_ACL_MAX_SIZE(ip->i_mount); - xfs_acl = kmem_zalloc_large(len, KM_SLEEP); + xfs_acl = kmem_zalloc_large(len, 0); if (!xfs_acl) return -ENOMEM; diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index dc93c51c17de..a640a285cc52 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -147,7 +147,7 @@ xfs_attr3_leaf_inactive( * Allocate storage for a list of all the "remote" value extents. */ size = count * sizeof(xfs_attr_inactive_list_t); - list = kmem_alloc(size, KM_SLEEP); + list = kmem_alloc(size, 0); /* * Identify each of the "remote" value extents. diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index 58fc820a70c6..00758fdc2fec 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c @@ -109,7 +109,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); - sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); + sbp = sbuf = kmem_alloc(sbsize, KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 9fa4a7ee8cfc..989163e1f900 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -141,7 +141,7 @@ xfs_bui_init( { struct xfs_bui_log_item *buip; - buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP); + buip = kmem_zone_zalloc(xfs_bui_zone, 0); xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops); buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS; @@ -218,7 +218,7 @@ xfs_trans_get_bud( { struct xfs_bud_log_item *budp; - budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP); + budp = kmem_zone_zalloc(xfs_bud_zone, 0); xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD, &xfs_bud_item_ops); budp->bud_buip = buip; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index ca0849043f54..d3be9ab0359b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1741,7 +1741,7 @@ xfs_alloc_buftarg( { xfs_buftarg_t *btp; - btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); + btp = kmem_zalloc(sizeof(*btp), KM_NOFS); btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 7dcaec54a20b..d74fbd1e9d3e 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -702,7 +702,7 @@ xfs_buf_item_get_format( } bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), - KM_SLEEP); + 0); if (!bip->bli_formats) return -ENOMEM; return 0; @@ -747,7 +747,7 @@ xfs_buf_item_init( return 0; } - bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP); + bip = kmem_zone_zalloc(xfs_buf_item_zone, 0); xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); bip->bli_buf = bp; diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index fb1ad4483081..7ce770e779b4 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -440,7 +440,7 @@ xfs_dquot_alloc( { struct xfs_dquot *dqp; - dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); + dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 282ec5af293e..d60647d7197b 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -347,7 +347,7 @@ xfs_qm_qoff_logitem_init( { struct xfs_qoff_logitem *qf; - qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); + qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), 0); xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 544c9482a0ef..849fd4476950 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -213,7 +213,7 @@ xfs_errortag_init( struct xfs_mount *mp) { mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX, - KM_SLEEP | KM_MAYFAIL); + KM_MAYFAIL); if (!mp->m_errortag) return -ENOMEM; diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 0ed68379e551..2183d87be4cf 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -33,7 +33,7 @@ xfs_extent_busy_insert( struct rb_node **rbp; struct rb_node *parent = NULL; - new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP); + new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0); new->agno = agno; new->bno = bno; new->length = len; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 86f6512d6864..e44efc41a041 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -163,9 +163,9 @@ xfs_efi_init( if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { size = (uint)(sizeof(xfs_efi_log_item_t) + ((nextents - 1) * sizeof(xfs_extent_t))); - efip = kmem_zalloc(size, KM_SLEEP); + efip = kmem_zalloc(size, 0); } else { - efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP); + efip = kmem_zone_zalloc(xfs_efi_zone, 0); } xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); @@ -333,9 +333,9 @@ xfs_trans_get_efd( if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) + (nextents - 1) * sizeof(struct xfs_extent), - KM_SLEEP); + 0); } else { - efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP); + efdp = kmem_zone_zalloc(xfs_efd_zone, 0); } xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD, diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 0b0fd10a36d4..944add5ff8e0 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -40,7 +40,7 @@ xfs_inode_alloc( * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ - ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); + ip = kmem_zone_alloc(xfs_inode_zone, 0); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c index d99a0a3e5f40..3ebd1b7f49d8 100644 --- a/fs/xfs/xfs_icreate_item.c +++ b/fs/xfs/xfs_icreate_item.c @@ -89,7 +89,7 @@ xfs_icreate_log( { struct xfs_icreate_item *icp; - icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP); + icp = kmem_zone_zalloc(xfs_icreate_zone, 0); xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, &xfs_icreate_item_ops); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 6467d5e1df2d..cdb97fa027fa 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2018,7 +2018,7 @@ xfs_iunlink_add_backref( if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) return 0; - iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS); + iu = kmem_zalloc(sizeof(*iu), KM_NOFS); iu->iu_agino = prev_agino; iu->iu_next_unlinked = this_agino; diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index c9a502eed204..bb8f076805b9 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -651,7 +651,7 @@ xfs_inode_item_init( struct xfs_inode_log_item *iip; ASSERT(ip->i_itemp == NULL); - iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); + iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0); iip->ili_inode = ip; xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 6f7848cd5527..9ea51664932e 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -396,7 +396,7 @@ xfs_attrlist_by_handle( if (IS_ERR(dentry)) return PTR_ERR(dentry); - kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); + kbuf = kmem_zalloc_large(al_hreq.buflen, 0); if (!kbuf) goto out_dput; @@ -434,7 +434,7 @@ xfs_attrmulti_attr_get( if (*len > XFS_XATTR_SIZE_MAX) return -EINVAL; - kbuf = kmem_zalloc_large(*len, KM_SLEEP); + kbuf = kmem_zalloc_large(*len, 0); if (!kbuf) return -ENOMEM; diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index 7bd7534f5051..1e08bf79b478 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -381,7 +381,7 @@ xfs_compat_attrlist_by_handle( return PTR_ERR(dentry); error = -ENOMEM; - kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); + kbuf = kmem_zalloc_large(al_hreq.buflen, 0); if (!kbuf) goto out_dput; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index f5c955d35be4..b049e7369a66 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -169,7 +169,7 @@ xfs_bulkstat_one( ASSERT(breq->icount == 1); bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), - KM_SLEEP | KM_MAYFAIL); + KM_MAYFAIL); if (!bc.buf) return -ENOMEM; @@ -243,7 +243,7 @@ xfs_bulkstat( return 0; bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), - KM_SLEEP | KM_MAYFAIL); + KM_MAYFAIL); if (!bc.buf) return -ENOMEM; diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index 8c7d727149ea..86ce52c1871f 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -616,7 +616,7 @@ xfs_iwalk_threaded( if (xfs_pwork_ctl_want_abort(&pctl)) break; - iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), KM_SLEEP); + iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0); iwag->mp = mp; iwag->iwalk_fn = iwalk_fn; iwag->data = data; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 7fc3c1ad36bc..50d854bfc45c 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -428,8 +428,7 @@ xfs_log_reserve( XFS_STATS_INC(mp, xs_try_logspace); ASSERT(*ticp == NULL); - tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, - KM_SLEEP); + tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0); *ticp = tic; xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index fa5602d0fd7f..ef652abd112c 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -38,7 +38,7 @@ xlog_cil_ticket_alloc( struct xlog_ticket *tic; tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, - KM_SLEEP|KM_NOFS); + KM_NOFS); /* * set the current reservation to zero so we know to steal the basic @@ -186,7 +186,7 @@ xlog_cil_alloc_shadow_bufs( */ kmem_free(lip->li_lv_shadow); - lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); + lv = kmem_alloc_large(buf_size, KM_NOFS); memset(lv, 0, xlog_cil_iovec_space(niovecs)); lv->lv_item = lip; @@ -660,7 +660,7 @@ xlog_cil_push( if (!cil) return 0; - new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); + new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS); new_ctx->ticket = xlog_cil_ticket_alloc(log); down_write(&cil->xc_ctx_lock); @@ -1179,11 +1179,11 @@ xlog_cil_init( struct xfs_cil *cil; struct xfs_cil_ctx *ctx; - cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); + cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); if (!cil) return -ENOMEM; - ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); + ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL); if (!ctx) { kmem_free(cil); return -ENOMEM; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 13d1d3e95b88..eafb36cb4c66 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1960,7 +1960,7 @@ xlog_recover_buffer_pass1( } } - bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); + bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0); bcp->bc_blkno = buf_f->blf_blkno; bcp->bc_len = buf_f->blf_len; bcp->bc_refcount = 1; @@ -2930,7 +2930,7 @@ xlog_recover_inode_pass2( if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { in_f = item->ri_buf[0].i_addr; } else { - in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP); + in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0); need_free = 1; error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); if (error) @@ -4161,7 +4161,7 @@ xlog_recover_add_item( { xlog_recover_item_t *item; - item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); + item = kmem_zalloc(sizeof(xlog_recover_item_t), 0); INIT_LIST_HEAD(&item->ri_list); list_add_tail(&item->ri_list, head); } @@ -4201,7 +4201,7 @@ xlog_recover_add_to_cont_trans( old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; old_len = item->ri_buf[item->ri_cnt-1].i_len; - ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP); + ptr = kmem_realloc(old_ptr, len + old_len, 0); memcpy(&ptr[old_len], dp, len); item->ri_buf[item->ri_cnt-1].i_len += len; item->ri_buf[item->ri_cnt-1].i_addr = ptr; @@ -4261,7 +4261,7 @@ xlog_recover_add_to_trans( return 0; } - ptr = kmem_alloc(len, KM_SLEEP); + ptr = kmem_alloc(len, 0); memcpy(ptr, dp, len); in_f = (struct xfs_inode_log_format *)ptr; @@ -4289,7 +4289,7 @@ xlog_recover_add_to_trans( item->ri_total = in_f->ilf_size; item->ri_buf = kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), - KM_SLEEP); + 0); } ASSERT(item->ri_total > item->ri_cnt); /* Description region is ri_buf[0] */ @@ -4423,7 +4423,7 @@ xlog_recover_ophdr_to_trans( * This is a new transaction so allocate a new recovery container to * hold the recovery ops that will follow. */ - trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); + trans = kmem_zalloc(sizeof(struct xlog_recover), 0); trans->r_log_tid = tid; trans->r_lsn = be64_to_cpu(rhead->h_lsn); INIT_LIST_HEAD(&trans->r_itemq); @@ -5527,7 +5527,7 @@ xlog_do_log_recovery( */ log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * sizeof(struct list_head), - KM_SLEEP); + 0); for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 322da6909290..da50b12ef634 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -82,7 +82,7 @@ xfs_uuid_mount( if (hole < 0) { xfs_uuid_table = kmem_realloc(xfs_uuid_table, (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), - KM_SLEEP); + 0); hole = xfs_uuid_table_size++; } xfs_uuid_table[hole] = *uuid; diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index 74738813f60d..a06661dac5be 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -333,12 +333,12 @@ xfs_mru_cache_create( if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) return -EINVAL; - if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) + if (!(mru = kmem_zalloc(sizeof(*mru), 0))) return -ENOMEM; /* An extra list is needed to avoid reaping up to a grp_time early. */ mru->grp_count = grp_count + 1; - mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); + mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0); if (!mru->lists) { err = -ENOMEM; diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 5e7a37f0cf84..ecd8ce152ab1 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -642,7 +642,7 @@ xfs_qm_init_quotainfo( ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); + qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0); error = list_lru_init(&qinf->qi_lru); if (error) @@ -978,7 +978,7 @@ xfs_qm_reset_dqcounts_buf( if (qip->i_d.di_nblocks == 0) return 0; - map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); + map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); lblkno = 0; maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index d8288aa0670a..db0e0d7cffb7 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -144,9 +144,9 @@ xfs_cui_init( ASSERT(nextents > 0); if (nextents > XFS_CUI_MAX_FAST_EXTENTS) cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents), - KM_SLEEP); + 0); else - cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP); + cuip = kmem_zone_zalloc(xfs_cui_zone, 0); xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); cuip->cui_format.cui_nextents = nextents; @@ -223,7 +223,7 @@ xfs_trans_get_cud( { struct xfs_cud_log_item *cudp; - cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP); + cudp = kmem_zone_zalloc(xfs_cud_zone, 0); xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, &xfs_cud_item_ops); cudp->cud_cuip = cuip; diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 77ed557b6127..8939e0ea09cd 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -142,9 +142,9 @@ xfs_rui_init( ASSERT(nextents > 0); if (nextents > XFS_RUI_MAX_FAST_EXTENTS) - ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP); + ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0); else - ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP); + ruip = kmem_zone_zalloc(xfs_rui_zone, 0); xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); ruip->rui_format.rui_nextents = nextents; @@ -244,7 +244,7 @@ xfs_trans_get_rud( { struct xfs_rud_log_item *rudp; - rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP); + rudp = kmem_zone_zalloc(xfs_rud_zone, 0); xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops); rudp->rud_ruip = ruip; diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 5fa4db3c3e32..4a48a8c75b4f 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -865,7 +865,7 @@ xfs_alloc_rsum_cache( * lower bound on the minimum level with any free extents. We can * continue without the cache if it couldn't be allocated. */ - mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, KM_SLEEP); + mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0); if (!mp->m_rsum_cache) xfs_warn(mp, "could not allocate realtime summary cache"); } @@ -963,7 +963,7 @@ xfs_growfs_rt( /* * Allocate a new (fake) mount/sb. */ - nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); + nmp = kmem_alloc(sizeof(*nmp), 0); /* * Loop over the bitmap blocks. * We will do everything one bitmap block at a time. diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index d42a68d8313b..f4795fdb7389 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -90,7 +90,7 @@ xfs_trans_dup( trace_xfs_trans_dup(tp, _RET_IP_); - ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + ntp = kmem_zone_zalloc(xfs_trans_zone, 0); /* * Initialize the new transaction structure. @@ -263,7 +263,7 @@ xfs_trans_alloc( * GFP_NOFS allocation context so that we avoid lockdep false positives * by doing GFP_KERNEL allocations inside sb_start_intwrite(). */ - tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + tp = kmem_zone_zalloc(xfs_trans_zone, 0); if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 1027c9ca6eb8..16457465833b 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -863,7 +863,7 @@ STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { - tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); + tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0); } void |