summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 10:32:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 10:32:08 -0700
commit7de6b4a246330fe29fa2fd144b4724ca35d60d6c (patch)
treee9ef69537787ded9bf98dba3e31ca1d50a829889 /kernel/workqueue.c
parentb71f0be2d23d876648758d57bc6761500e3b9c70 (diff)
parent76af54648899abbd6b449c035583e47fd407078a (diff)
downloadlwn-7de6b4a246330fe29fa2fd144b4724ca35d60d6c.tar.gz
lwn-7de6b4a246330fe29fa2fd144b4724ca35d60d6c.zip
Merge tag 'wq-for-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: - New default WQ_AFFN_CACHE_SHARD affinity scope subdivides LLCs into smaller shards to improve scalability on machines with many CPUs per LLC - Misc: - system_dfl_long_wq for long unbound works - devm_alloc_workqueue() for device-managed allocation - sysfs exposure for ordered workqueues and the EFI workqueue - removal of HK_TYPE_WQ from wq_unbound_cpumask - various small fixes * tag 'wq-for-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (21 commits) workqueue: validate cpumask_first() result in llc_populate_cpu_shard_id() workqueue: use NR_STD_WORKER_POOLS instead of hardcoded value workqueue: avoid unguarded 64-bit division docs: workqueue: document WQ_AFFN_CACHE_SHARD affinity scope workqueue: add test_workqueue benchmark module tools/workqueue: add CACHE_SHARD support to wq_dump.py workqueue: set WQ_AFFN_CACHE_SHARD as the default affinity scope workqueue: add WQ_AFFN_CACHE_SHARD affinity scope workqueue: fix typo in WQ_AFFN_SMT comment workqueue: Remove HK_TYPE_WQ from affecting wq_unbound_cpumask workqueue: unlink pwqs from wq->pwqs list in alloc_and_link_pwqs() error path workqueue: Remove NULL wq WARN in __queue_delayed_work() workqueue: fix parse_affn_scope() prefix matching bug workqueue: devres: Add device-managed allocate workqueue workqueue: Add system_dfl_long_wq for long unbound works tools/workqueue/wq_dump.py: add NODE prefix to all node columns tools/workqueue/wq_dump.py: fix column alignment in node_nr/max_active section tools/workqueue/wq_dump.py: remove backslash separator from node_nr/max_active header efi: Allow to expose the workqueue via sysfs workqueue: Allow to expose ordered workqueues via sysfs ...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c285
1 files changed, 261 insertions, 24 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c6ea96d5b716..5f747f241a5f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/debug_locks.h>
+#include <linux/device/devres.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/jhash.h>
@@ -130,6 +131,14 @@ enum wq_internal_consts {
WORKER_ID_LEN = 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
};
+/* Layout of shards within one LLC pod */
+struct llc_shard_layout {
+ int nr_large_shards; /* number of large shards (cores_per_shard + 1) */
+ int cores_per_shard; /* base number of cores per default shard */
+ int nr_shards; /* total number of shards */
+ /* nr_default shards = (nr_shards - nr_large_shards) */
+};
+
/*
* We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and
* MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because
@@ -404,11 +413,12 @@ struct work_offq_data {
u32 flags;
};
-static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
+static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = {
[WQ_AFFN_DFL] = "default",
[WQ_AFFN_CPU] = "cpu",
[WQ_AFFN_SMT] = "smt",
[WQ_AFFN_CACHE] = "cache",
+ [WQ_AFFN_CACHE_SHARD] = "cache_shard",
[WQ_AFFN_NUMA] = "numa",
[WQ_AFFN_SYSTEM] = "system",
};
@@ -431,13 +441,16 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
+static unsigned int wq_cache_shard_size = 8;
+module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444);
+
static bool wq_online; /* can kworkers be created yet? */
static bool wq_topo_initialized __read_mostly = false;
static struct kmem_cache *pwq_cache;
static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
-static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
+static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE_SHARD;
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
@@ -530,6 +543,8 @@ struct workqueue_struct *system_bh_wq;
EXPORT_SYMBOL_GPL(system_bh_wq);
struct workqueue_struct *system_bh_highpri_wq;
EXPORT_SYMBOL_GPL(system_bh_highpri_wq);
+struct workqueue_struct *system_dfl_long_wq __ro_after_init;
+EXPORT_SYMBOL_GPL(system_dfl_long_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -2519,7 +2534,6 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
- WARN_ON_ONCE(!wq);
WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
@@ -5635,8 +5649,16 @@ enomem:
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
- if (pwq)
+ if (pwq) {
+ /*
+ * Unlink pwq from wq->pwqs since link_pwq()
+ * may have already added it. wq->mutex is not
+ * needed as the wq has not been published yet.
+ */
+ if (!list_empty(&pwq->pwqs_node))
+ list_del_rcu(&pwq->pwqs_node);
kmem_cache_free(pwq_cache, pwq);
+ }
}
free_percpu(wq->cpu_pwq);
wq->cpu_pwq = NULL;
@@ -5904,6 +5926,33 @@ struct workqueue_struct *alloc_workqueue_noprof(const char *fmt,
}
EXPORT_SYMBOL_GPL(alloc_workqueue_noprof);
+static void devm_workqueue_release(void *res)
+{
+ destroy_workqueue(res);
+}
+
+__printf(2, 5) struct workqueue_struct *
+devm_alloc_workqueue(struct device *dev, const char *fmt, unsigned int flags,
+ int max_active, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args;
+ int ret;
+
+ va_start(args, max_active);
+ wq = alloc_workqueue(fmt, flags, max_active, args);
+ va_end(args);
+ if (!wq)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_workqueue_release, wq);
+ if (ret)
+ return NULL;
+
+ return wq;
+}
+EXPORT_SYMBOL_GPL(devm_alloc_workqueue);
+
#ifdef CONFIG_LOCKDEP
__printf(1, 5)
struct workqueue_struct *
@@ -7059,7 +7108,7 @@ int workqueue_unbound_housekeeping_update(const struct cpumask *hk)
/*
* If the operation fails, it will fall back to
* wq_requested_unbound_cpumask which is initially set to
- * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten
+ * HK_TYPE_DOMAIN house keeping mask and rewritten
* by any subsequent write to workqueue/cpumask sysfs file.
*/
if (!cpumask_and(cpumask, wq_requested_unbound_cpumask, hk))
@@ -7078,13 +7127,7 @@ int workqueue_unbound_housekeeping_update(const struct cpumask *hk)
static int parse_affn_scope(const char *val)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
- if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
- return i;
- }
- return -EINVAL;
+ return sysfs_match_string(wq_affn_names, val);
}
static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
@@ -7191,7 +7234,26 @@ static struct attribute *wq_sysfs_attrs[] = {
&dev_attr_max_active.attr,
NULL,
};
-ATTRIBUTE_GROUPS(wq_sysfs);
+
+static umode_t wq_sysfs_is_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct workqueue_struct *wq = dev_to_wq(dev);
+
+ /*
+ * Adjusting max_active breaks ordering guarantee. Changing it has no
+ * effect on BH worker. Limit max_active to RO in such case.
+ */
+ if (wq->flags & (WQ_BH | __WQ_ORDERED))
+ return 0444;
+ return a->mode;
+}
+
+static const struct attribute_group wq_sysfs_group = {
+ .is_visible = wq_sysfs_is_visible,
+ .attrs = wq_sysfs_attrs,
+};
+__ATTRIBUTE_GROUPS(wq_sysfs);
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -7494,13 +7556,6 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
struct wq_device *wq_dev;
int ret;
- /*
- * Adjusting max_active breaks ordering guarantee. Disallow exposing
- * ordered workqueues.
- */
- if (WARN_ON(wq->flags & __WQ_ORDERED))
- return -EINVAL;
-
wq->wq_dev = wq_dev = kzalloc_obj(*wq_dev);
if (!wq_dev)
return -ENOMEM;
@@ -7877,8 +7932,8 @@ void __init workqueue_init_early(void)
{
struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
- void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal,
- bh_pool_kick_highpri };
+ void (*irq_work_fns[NR_STD_WORKER_POOLS])(struct irq_work *) =
+ { bh_pool_kick_normal, bh_pool_kick_highpri };
int i, cpu;
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
@@ -7890,7 +7945,6 @@ void __init workqueue_init_early(void)
cpumask_copy(wq_online_cpumask, cpu_online_mask);
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
- restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
if (!cpumask_empty(&wq_cmdline_cpumask))
restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
@@ -7974,11 +8028,12 @@ void __init workqueue_init_early(void)
system_bh_wq = alloc_workqueue("events_bh", WQ_BH | WQ_PERCPU, 0);
system_bh_highpri_wq = alloc_workqueue("events_bh_highpri",
WQ_BH | WQ_HIGHPRI | WQ_PERCPU, 0);
+ system_dfl_long_wq = alloc_workqueue("events_dfl_long", WQ_UNBOUND, WQ_MAX_ACTIVE);
BUG_ON(!system_wq || !system_percpu_wq|| !system_highpri_wq || !system_long_wq ||
!system_unbound_wq || !system_freezable_wq || !system_dfl_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq ||
- !system_bh_wq || !system_bh_highpri_wq);
+ !system_bh_wq || !system_bh_highpri_wq || !system_dfl_long_wq);
}
static void __init wq_cpu_intensive_thresh_init(void)
@@ -8144,6 +8199,186 @@ static bool __init cpus_share_numa(int cpu0, int cpu1)
return cpu_to_node(cpu0) == cpu_to_node(cpu1);
}
+/* Maps each CPU to its shard index within the LLC pod it belongs to */
+static int cpu_shard_id[NR_CPUS] __initdata;
+
+/**
+ * llc_count_cores - count distinct cores (SMT groups) within an LLC pod
+ * @pod_cpus: the cpumask of CPUs in the LLC pod
+ * @smt_pods: the SMT pod type, used to identify sibling groups
+ *
+ * A core is represented by the lowest-numbered CPU in its SMT group. Returns
+ * the number of distinct cores found in @pod_cpus.
+ */
+static int __init llc_count_cores(const struct cpumask *pod_cpus,
+ struct wq_pod_type *smt_pods)
+{
+ const struct cpumask *sibling_cpus;
+ int nr_cores = 0, c;
+
+ /*
+ * Count distinct cores by only counting the first CPU in each
+ * SMT sibling group.
+ */
+ for_each_cpu(c, pod_cpus) {
+ sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+ if (cpumask_first(sibling_cpus) == c)
+ nr_cores++;
+ }
+
+ return nr_cores;
+}
+
+/*
+ * llc_shard_size - number of cores in a given shard
+ *
+ * Cores are spread as evenly as possible. The first @nr_large_shards shards are
+ * "large shards" with (cores_per_shard + 1) cores; the rest are "default
+ * shards" with cores_per_shard cores.
+ */
+static int __init llc_shard_size(int shard_id, int cores_per_shard, int nr_large_shards)
+{
+ /* The first @nr_large_shards shards are large shards */
+ if (shard_id < nr_large_shards)
+ return cores_per_shard + 1;
+
+ /* The remaining shards are default shards */
+ return cores_per_shard;
+}
+
+/*
+ * llc_calc_shard_layout - compute the shard layout for an LLC pod
+ * @nr_cores: number of distinct cores in the LLC pod
+ *
+ * Chooses the number of shards that keeps average shard size closest to
+ * wq_cache_shard_size. Returns a struct describing the total number of shards,
+ * the base size of each, and how many are large shards.
+ */
+static struct llc_shard_layout __init llc_calc_shard_layout(int nr_cores)
+{
+ struct llc_shard_layout layout;
+
+ /* Ensure at least one shard; pick the count closest to the target size */
+ layout.nr_shards = max(1, DIV_ROUND_CLOSEST(nr_cores, wq_cache_shard_size));
+ layout.cores_per_shard = nr_cores / layout.nr_shards;
+ layout.nr_large_shards = nr_cores % layout.nr_shards;
+
+ return layout;
+}
+
+/*
+ * llc_shard_is_full - check whether a shard has reached its core capacity
+ * @cores_in_shard: number of cores already assigned to this shard
+ * @shard_id: index of the shard being checked
+ * @layout: the shard layout computed by llc_calc_shard_layout()
+ *
+ * Returns true if @cores_in_shard equals the expected size for @shard_id.
+ */
+static bool __init llc_shard_is_full(int cores_in_shard, int shard_id,
+ const struct llc_shard_layout *layout)
+{
+ return cores_in_shard == llc_shard_size(shard_id, layout->cores_per_shard,
+ layout->nr_large_shards);
+}
+
+/**
+ * llc_populate_cpu_shard_id - populate cpu_shard_id[] for each CPU in an LLC pod
+ * @pod_cpus: the cpumask of CPUs in the LLC pod
+ * @smt_pods: the SMT pod type, used to identify sibling groups
+ * @nr_cores: number of distinct cores in @pod_cpus (from llc_count_cores())
+ *
+ * Walks @pod_cpus in order. At each SMT group leader, advances to the next
+ * shard once the current shard is full. Results are written to cpu_shard_id[].
+ */
+static void __init llc_populate_cpu_shard_id(const struct cpumask *pod_cpus,
+ struct wq_pod_type *smt_pods,
+ int nr_cores)
+{
+ struct llc_shard_layout layout = llc_calc_shard_layout(nr_cores);
+ const struct cpumask *sibling_cpus;
+ /* Count the number of cores in the current shard_id */
+ int cores_in_shard = 0;
+ unsigned int leader;
+ /* This is a cursor for the shards. Go from zero to nr_shards - 1*/
+ int shard_id = 0;
+ int c;
+
+ /* Iterate at every CPU for a given LLC pod, and assign it a shard */
+ for_each_cpu(c, pod_cpus) {
+ sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+ if (cpumask_first(sibling_cpus) == c) {
+ /* This is the CPU leader for the siblings */
+ if (llc_shard_is_full(cores_in_shard, shard_id, &layout)) {
+ shard_id++;
+ cores_in_shard = 0;
+ }
+ cores_in_shard++;
+ cpu_shard_id[c] = shard_id;
+ } else {
+ /*
+ * The siblings' shard MUST be the same as the leader.
+ * never split threads in the same core.
+ */
+ leader = cpumask_first(sibling_cpus);
+
+ /*
+ * This check silences a Warray-bounds warning on UP
+ * configs where NR_CPUS=1 makes cpu_shard_id[]
+ * a single-element array, and the compiler can't
+ * prove the index is always 0.
+ */
+ if (WARN_ON_ONCE(leader >= nr_cpu_ids))
+ continue;
+ cpu_shard_id[c] = cpu_shard_id[leader];
+ }
+ }
+
+ WARN_ON_ONCE(shard_id != (layout.nr_shards - 1));
+}
+
+/**
+ * precompute_cache_shard_ids - assign each CPU its shard index within its LLC
+ *
+ * Iterates over all LLC pods. For each pod, counts distinct cores then assigns
+ * shard indices to all CPUs in the pod. Must be called after WQ_AFFN_CACHE and
+ * WQ_AFFN_SMT have been initialized.
+ */
+static void __init precompute_cache_shard_ids(void)
+{
+ struct wq_pod_type *llc_pods = &wq_pod_types[WQ_AFFN_CACHE];
+ struct wq_pod_type *smt_pods = &wq_pod_types[WQ_AFFN_SMT];
+ const struct cpumask *cpus_sharing_llc;
+ int nr_cores;
+ int pod;
+
+ if (!wq_cache_shard_size) {
+ pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n");
+ wq_cache_shard_size = 1;
+ }
+
+ for (pod = 0; pod < llc_pods->nr_pods; pod++) {
+ cpus_sharing_llc = llc_pods->pod_cpus[pod];
+
+ /* Number of cores in this given LLC */
+ nr_cores = llc_count_cores(cpus_sharing_llc, smt_pods);
+ llc_populate_cpu_shard_id(cpus_sharing_llc, smt_pods, nr_cores);
+ }
+}
+
+/*
+ * cpus_share_cache_shard - test whether two CPUs belong to the same cache shard
+ *
+ * Two CPUs share a cache shard if they are in the same LLC and have the same
+ * shard index. Used as the pod affinity callback for WQ_AFFN_CACHE_SHARD.
+ */
+static bool __init cpus_share_cache_shard(int cpu0, int cpu1)
+{
+ if (!cpus_share_cache(cpu0, cpu1))
+ return false;
+
+ return cpu_shard_id[cpu0] == cpu_shard_id[cpu1];
+}
+
/**
* workqueue_init_topology - initialize CPU pods for unbound workqueues
*
@@ -8159,6 +8394,8 @@ void __init workqueue_init_topology(void)
init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+ precompute_cache_shard_ids();
+ init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard);
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
wq_topo_initialized = true;