summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/hibernate.c2
-rw-r--r--arch/riscv/kernel/module.c6
-rw-r--r--arch/riscv/kernel/tests/kprobes/test-kprobes.c2
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c2
4 files changed, 6 insertions, 6 deletions
diff --git a/arch/riscv/kernel/hibernate.c b/arch/riscv/kernel/hibernate.c
index 0e31c02cb554..982843828adb 100644
--- a/arch/riscv/kernel/hibernate.c
+++ b/arch/riscv/kernel/hibernate.c
@@ -415,7 +415,7 @@ int hibernate_resume_nonboot_cpu_disable(void)
static int __init riscv_hibernate_init(void)
{
- hibernate_cpu_context = kzalloc_obj(*hibernate_cpu_context, GFP_KERNEL);
+ hibernate_cpu_context = kzalloc_obj(*hibernate_cpu_context);
if (WARN_ON(!hibernate_cpu_context))
return -ENOMEM;
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index addc7dac2424..cc2f980700f3 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -663,7 +663,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
struct used_bucket *bucket;
unsigned long hash;
- entry = kmalloc_obj(*entry, GFP_KERNEL);
+ entry = kmalloc_obj(*entry);
if (!entry)
return -ENOMEM;
@@ -697,7 +697,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
* relocation_entry.
*/
if (!found) {
- rel_head = kmalloc_obj(*rel_head, GFP_KERNEL);
+ rel_head = kmalloc_obj(*rel_head);
if (!rel_head) {
kfree(entry);
@@ -709,7 +709,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
bucket =
- kmalloc_obj(struct used_bucket, GFP_KERNEL);
+ kmalloc_obj(struct used_bucket);
if (!bucket) {
kfree(entry);
diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes.c b/arch/riscv/kernel/tests/kprobes/test-kprobes.c
index c0526c0c7527..027424a3ff7b 100644
--- a/arch/riscv/kernel/tests/kprobes/test-kprobes.c
+++ b/arch/riscv/kernel/tests/kprobes/test-kprobes.c
@@ -20,7 +20,7 @@ static void test_kprobe_riscv(struct kunit *test)
while (test_kprobes_addresses[num_kprobe])
num_kprobe++;
- kp = kzalloc_objs(*kp, num_kprobe, GFP_KERNEL);
+ kp = kzalloc_objs(*kp, num_kprobe);
KUNIT_EXPECT_TRUE(test, kp);
if (!kp)
return;
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 63ed6e6b24eb..b36a6a56f404 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -139,7 +139,7 @@ static void __init check_unaligned_access_speed_all_cpus(void)
{
unsigned int cpu;
unsigned int cpu_count = num_possible_cpus();
- struct page **bufs = kzalloc_objs(*bufs, cpu_count, GFP_KERNEL);
+ struct page **bufs = kzalloc_objs(*bufs, cpu_count);
if (!bufs) {
pr_warn("Allocation failure, not measuring misaligned performance\n");