summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-02-20 00:53:38 +0100
committerDavid S. Miller <davem@davemloft.net>2015-02-20 17:38:09 -0500
commiteb6d1abf1bd8bf1beb45b5401c8324bdb8f893c4 (patch)
tree75f3fee2718ab7194dca0153ff765484b412401a /lib/rhashtable.c
parent342100d937ed6e5faf1e7ee7dcd7b3935fec8877 (diff)
downloadlwn-eb6d1abf1bd8bf1beb45b5401c8324bdb8f893c4.tar.gz
lwn-eb6d1abf1bd8bf1beb45b5401c8324bdb8f893c4.zip
rhashtable: better high order allocation attempts
When trying to allocate future tables via bucket_table_alloc(), it seems overkill on large table shifts that we probe for kzalloc() unconditionally first, as it's likely to fail. Only probe with kzalloc() for more reasonable table sizes and use vzalloc() either as a fallback on failure or directly in case of large table sizes. Fixes: 7e1e77636e36 ("lib: Resizable, Scalable, Concurrent Hash Table") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 38f7879df0d8..b41a5c09832a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -217,15 +217,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets)
{
- struct bucket_table *tbl;
+ struct bucket_table *tbl = NULL;
size_t size;
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (tbl == NULL)
tbl = vzalloc(size);
-
if (tbl == NULL)
return NULL;