summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@linux.ibm.com>2024-09-02 14:02:19 +0200
committerHeiko Carstens <hca@linux.ibm.com>2024-09-05 15:17:23 +0200
commit131b8db78558120f58c5dc745ea9655f6b854162 (patch)
tree8883d6c5152c82831515d3e58ae522bb09cf5772 /arch/s390
parent0114009953c119021870c42c778b251440a93844 (diff)
downloadlwn-131b8db78558120f58c5dc745ea9655f6b854162.tar.gz
lwn-131b8db78558120f58c5dc745ea9655f6b854162.zip
s390/mm: Add cond_resched() to cmm_alloc/free_pages()
Adding/removing large amount of pages at once to/from the CMM balloon can result in rcu_sched stalls or workqueue lockups, because of busy looping w/o cond_resched(). Prevent this by adding a cond_resched(). cmm_free_pages() holds a spin_lock while looping, so it cannot be added directly to the existing loop. Instead, introduce a wrapper function that operates on maximum 256 pages at once, and add it there. Signed-off-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/mm/cmm.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 75d15bf41d97..d01724a715d0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
+ cond_resched();
}
return nr;
}
-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+{
+ long inc = 0;
+
+ while (nr) {
+ inc = min(256L, nr);
+ nr -= inc;
+ inc = __cmm_free_pages(inc, counter, list);
+ if (inc)
+ break;
+ cond_resched();
+ }
+ return nr + inc;
+}
+
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{