diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 379 |
1 files changed, 324 insertions, 55 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aeaec79bc09c..5f747f241a5f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -41,6 +41,7 @@ #include <linux/mempolicy.h> #include <linux/freezer.h> #include <linux/debug_locks.h> +#include <linux/device/devres.h> #include <linux/lockdep.h> #include <linux/idr.h> #include <linux/jhash.h> @@ -130,6 +131,14 @@ enum wq_internal_consts { WORKER_ID_LEN = 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */ }; +/* Layout of shards within one LLC pod */ +struct llc_shard_layout { + int nr_large_shards; /* number of large shards (cores_per_shard + 1) */ + int cores_per_shard; /* base number of cores per default shard */ + int nr_shards; /* total number of shards */ + /* nr_default shards = (nr_shards - nr_large_shards) */ +}; + /* * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because @@ -190,7 +199,7 @@ struct worker_pool { int id; /* I: pool ID */ unsigned int flags; /* L: flags */ - unsigned long watchdog_ts; /* L: watchdog timestamp */ + unsigned long last_progress_ts; /* L: last forward progress timestamp */ bool cpu_stall; /* WD: stalled cpu bound pool */ /* @@ -404,11 +413,12 @@ struct work_offq_data { u32 flags; }; -static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { +static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_DFL] = "default", [WQ_AFFN_CPU] = "cpu", [WQ_AFFN_SMT] = "smt", [WQ_AFFN_CACHE] = "cache", + [WQ_AFFN_CACHE_SHARD] = "cache_shard", [WQ_AFFN_NUMA] = "numa", [WQ_AFFN_SYSTEM] = "system", }; @@ -431,13 +441,16 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); module_param_named(power_efficient, wq_power_efficient, bool, 0444); +static unsigned int wq_cache_shard_size = 8; +module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444); + static bool wq_online; /* can kworkers be created yet? */ static bool wq_topo_initialized __read_mostly = false; static struct kmem_cache *pwq_cache; static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; -static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; +static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE_SHARD; /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf; @@ -530,6 +543,8 @@ struct workqueue_struct *system_bh_wq; EXPORT_SYMBOL_GPL(system_bh_wq); struct workqueue_struct *system_bh_highpri_wq; EXPORT_SYMBOL_GPL(system_bh_highpri_wq); +struct workqueue_struct *system_dfl_long_wq __ro_after_init; +EXPORT_SYMBOL_GPL(system_dfl_long_wq); static int worker_thread(void *__worker); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); @@ -1697,7 +1712,7 @@ static void __pwq_activate_work(struct pool_workqueue *pwq, WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) - pwq->pool->watchdog_ts = jiffies; + pwq->pool->last_progress_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); } @@ -1849,8 +1864,20 @@ static void unplug_oldest_pwq(struct workqueue_struct *wq) raw_spin_lock_irq(&pwq->pool->lock); if (pwq->plugged) { pwq->plugged = false; - if (pwq_activate_first_inactive(pwq, true)) + if (pwq_activate_first_inactive(pwq, true)) { + /* + * While plugged, queueing skips activation which + * includes bumping the nr_active count and adding the + * pwq to nna->pending_pwqs if the count can't be + * obtained. We need to restore both for the pwq being + * unplugged. The first call activates the first + * inactive work item and the second, if there are more + * inactive, puts the pwq on pending_pwqs. + */ + pwq_activate_first_inactive(pwq, false); + kick_pool(pwq->pool); + } } raw_spin_unlock_irq(&pwq->pool->lock); } @@ -2348,7 +2375,7 @@ retry: */ if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { if (list_empty(&pool->worklist)) - pool->watchdog_ts = jiffies; + pool->last_progress_ts = jiffies; trace_workqueue_activate_work(work); insert_work(pwq, work, &pool->worklist, work_flags); @@ -2507,7 +2534,6 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - WARN_ON_ONCE(!wq); WARN_ON_ONCE(timer->function != delayed_work_timer_fn); WARN_ON_ONCE(timer_pending(timer)); WARN_ON_ONCE(!list_empty(&work->entry)); @@ -3204,6 +3230,7 @@ __acquires(&pool->lock) worker->current_pwq = pwq; if (worker->task) worker->current_at = worker->task->se.sum_exec_runtime; + worker->current_start = jiffies; work_data = *work_data_bits(work); worker->current_color = get_work_color(work_data); @@ -3352,7 +3379,7 @@ static void process_scheduled_works(struct worker *worker) while ((work = list_first_entry_or_null(&worker->scheduled, struct work_struct, entry))) { if (first) { - worker->pool->watchdog_ts = jiffies; + worker->pool->last_progress_ts = jiffies; first = false; } process_one_work(worker, work); @@ -4850,7 +4877,7 @@ static int init_worker_pool(struct worker_pool *pool) pool->cpu = -1; pool->node = NUMA_NO_NODE; pool->flags |= POOL_DISASSOCIATED; - pool->watchdog_ts = jiffies; + pool->last_progress_ts = jiffies; INIT_LIST_HEAD(&pool->worklist); INIT_LIST_HEAD(&pool->idle_list); hash_init(pool->busy_hash); @@ -5622,8 +5649,16 @@ enomem: for_each_possible_cpu(cpu) { struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); - if (pwq) + if (pwq) { + /* + * Unlink pwq from wq->pwqs since link_pwq() + * may have already added it. wq->mutex is not + * needed as the wq has not been published yet. + */ + if (!list_empty(&pwq->pwqs_node)) + list_del_rcu(&pwq->pwqs_node); kmem_cache_free(pwq_cache, pwq); + } } free_percpu(wq->cpu_pwq); wq->cpu_pwq = NULL; @@ -5891,6 +5926,33 @@ struct workqueue_struct *alloc_workqueue_noprof(const char *fmt, } EXPORT_SYMBOL_GPL(alloc_workqueue_noprof); +static void devm_workqueue_release(void *res) +{ + destroy_workqueue(res); +} + +__printf(2, 5) struct workqueue_struct * +devm_alloc_workqueue(struct device *dev, const char *fmt, unsigned int flags, + int max_active, ...) +{ + struct workqueue_struct *wq; + va_list args; + int ret; + + va_start(args, max_active); + wq = alloc_workqueue(fmt, flags, max_active, args); + va_end(args); + if (!wq) + return NULL; + + ret = devm_add_action_or_reset(dev, devm_workqueue_release, wq); + if (ret) + return NULL; + + return wq; +} +EXPORT_SYMBOL_GPL(devm_alloc_workqueue); + #ifdef CONFIG_LOCKDEP __printf(1, 5) struct workqueue_struct * @@ -6274,7 +6336,7 @@ static void pr_cont_worker_id(struct worker *worker) { struct worker_pool *pool = worker->pool; - if (pool->flags & WQ_BH) + if (pool->flags & POOL_BH) pr_cont("bh%s", pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); else @@ -6359,6 +6421,8 @@ static void show_pwq(struct pool_workqueue *pwq) pr_cont(" %s", comma ? "," : ""); pr_cont_worker_id(worker); pr_cont(":%ps", worker->current_func); + pr_cont(" for %us", + jiffies_to_msecs(jiffies - worker->current_start) / 1000); list_for_each_entry(work, &worker->scheduled, entry) pr_cont_work(false, work, &pcws); pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); @@ -6462,7 +6526,7 @@ static void show_one_worker_pool(struct worker_pool *pool) /* How long the first pending work is waiting for a worker. */ if (!list_empty(&pool->worklist)) - hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; + hung = jiffies_to_msecs(jiffies - pool->last_progress_ts) / 1000; /* * Defer printing to avoid deadlocks in console drivers that @@ -7044,7 +7108,7 @@ int workqueue_unbound_housekeeping_update(const struct cpumask *hk) /* * If the operation fails, it will fall back to * wq_requested_unbound_cpumask which is initially set to - * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten + * HK_TYPE_DOMAIN house keeping mask and rewritten * by any subsequent write to workqueue/cpumask sysfs file. */ if (!cpumask_and(cpumask, wq_requested_unbound_cpumask, hk)) @@ -7063,13 +7127,7 @@ int workqueue_unbound_housekeeping_update(const struct cpumask *hk) static int parse_affn_scope(const char *val) { - int i; - - for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { - if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) - return i; - } - return -EINVAL; + return sysfs_match_string(wq_affn_names, val); } static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) @@ -7176,7 +7234,26 @@ static struct attribute *wq_sysfs_attrs[] = { &dev_attr_max_active.attr, NULL, }; -ATTRIBUTE_GROUPS(wq_sysfs); + +static umode_t wq_sysfs_is_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct workqueue_struct *wq = dev_to_wq(dev); + + /* + * Adjusting max_active breaks ordering guarantee. Changing it has no + * effect on BH worker. Limit max_active to RO in such case. + */ + if (wq->flags & (WQ_BH | __WQ_ORDERED)) + return 0444; + return a->mode; +} + +static const struct attribute_group wq_sysfs_group = { + .is_visible = wq_sysfs_is_visible, + .attrs = wq_sysfs_attrs, +}; +__ATTRIBUTE_GROUPS(wq_sysfs); static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -7479,13 +7556,6 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) struct wq_device *wq_dev; int ret; - /* - * Adjusting max_active breaks ordering guarantee. Disallow exposing - * ordered workqueues. - */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) - return -EINVAL; - wq->wq_dev = wq_dev = kzalloc_obj(*wq_dev); if (!wq_dev) return -ENOMEM; @@ -7580,11 +7650,11 @@ MODULE_PARM_DESC(panic_on_stall_time, "Panic if stall exceeds this many seconds /* * Show workers that might prevent the processing of pending work items. - * The only candidates are CPU-bound workers in the running state. - * Pending work items should be handled by another idle worker - * in all other situations. + * A busy worker that is not running on the CPU (e.g. sleeping in + * wait_event_idle() with PF_WQ_WORKER cleared) can stall the pool just as + * effectively as a CPU-bound one, so dump every in-flight worker. */ -static void show_cpu_pool_hog(struct worker_pool *pool) +static void show_cpu_pool_busy_workers(struct worker_pool *pool) { struct worker *worker; unsigned long irq_flags; @@ -7593,36 +7663,34 @@ static void show_cpu_pool_hog(struct worker_pool *pool) raw_spin_lock_irqsave(&pool->lock, irq_flags); hash_for_each(pool->busy_hash, bkt, worker, hentry) { - if (task_is_running(worker->task)) { - /* - * Defer printing to avoid deadlocks in console - * drivers that queue work while holding locks - * also taken in their write paths. - */ - printk_deferred_enter(); + /* + * Defer printing to avoid deadlocks in console + * drivers that queue work while holding locks + * also taken in their write paths. + */ + printk_deferred_enter(); - pr_info("pool %d:\n", pool->id); - sched_show_task(worker->task); + pr_info("pool %d:\n", pool->id); + sched_show_task(worker->task); - printk_deferred_exit(); - } + printk_deferred_exit(); } raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } -static void show_cpu_pools_hogs(void) +static void show_cpu_pools_busy_workers(void) { struct worker_pool *pool; int pi; - pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); + pr_info("Showing backtraces of busy workers in stalled worker pools:\n"); rcu_read_lock(); for_each_pool(pool, pi) { if (pool->cpu_stall) - show_cpu_pool_hog(pool); + show_cpu_pool_busy_workers(pool); } @@ -7691,15 +7759,36 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); else touched = READ_ONCE(wq_watchdog_touched); - pool_ts = READ_ONCE(pool->watchdog_ts); + pool_ts = READ_ONCE(pool->last_progress_ts); if (time_after(pool_ts, touched)) ts = pool_ts; else ts = touched; - /* did we stall? */ + /* + * Did we stall? + * + * Do a lockless check first to do not disturb the system. + * + * Prevent false positives by double checking the timestamp + * under pool->lock. The lock makes sure that the check reads + * an updated pool->last_progress_ts when this CPU saw + * an already updated pool->worklist above. It seems better + * than adding another barrier into __queue_work() which + * is a hotter path. + */ if (time_after(now, ts + thresh)) { + scoped_guard(raw_spinlock_irqsave, &pool->lock) { + pool_ts = pool->last_progress_ts; + if (time_after(pool_ts, touched)) + ts = pool_ts; + else + ts = touched; + } + if (!time_after(now, ts + thresh)) + continue; + lockup_detected = true; stall_time = jiffies_to_msecs(now - pool_ts) / 1000; max_stall_time = max(max_stall_time, stall_time); @@ -7711,15 +7800,13 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) pr_cont_pool_info(pool); pr_cont(" stuck for %us!\n", stall_time); } - - } if (lockup_detected) show_all_workqueues(); if (cpu_pool_stall) - show_cpu_pools_hogs(); + show_cpu_pools_busy_workers(); if (lockup_detected) panic_on_wq_watchdog(max_stall_time); @@ -7845,8 +7932,8 @@ void __init workqueue_init_early(void) { struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; - void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal, - bh_pool_kick_highpri }; + void (*irq_work_fns[NR_STD_WORKER_POOLS])(struct irq_work *) = + { bh_pool_kick_normal, bh_pool_kick_highpri }; int i, cpu; BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); @@ -7858,7 +7945,6 @@ void __init workqueue_init_early(void) cpumask_copy(wq_online_cpumask, cpu_online_mask); cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); - restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); if (!cpumask_empty(&wq_cmdline_cpumask)) restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); @@ -7942,11 +8028,12 @@ void __init workqueue_init_early(void) system_bh_wq = alloc_workqueue("events_bh", WQ_BH | WQ_PERCPU, 0); system_bh_highpri_wq = alloc_workqueue("events_bh_highpri", WQ_BH | WQ_HIGHPRI | WQ_PERCPU, 0); + system_dfl_long_wq = alloc_workqueue("events_dfl_long", WQ_UNBOUND, WQ_MAX_ACTIVE); BUG_ON(!system_wq || !system_percpu_wq|| !system_highpri_wq || !system_long_wq || !system_unbound_wq || !system_freezable_wq || !system_dfl_wq || !system_power_efficient_wq || !system_freezable_power_efficient_wq || - !system_bh_wq || !system_bh_highpri_wq); + !system_bh_wq || !system_bh_highpri_wq || !system_dfl_long_wq); } static void __init wq_cpu_intensive_thresh_init(void) @@ -8112,6 +8199,186 @@ static bool __init cpus_share_numa(int cpu0, int cpu1) return cpu_to_node(cpu0) == cpu_to_node(cpu1); } +/* Maps each CPU to its shard index within the LLC pod it belongs to */ +static int cpu_shard_id[NR_CPUS] __initdata; + +/** + * llc_count_cores - count distinct cores (SMT groups) within an LLC pod + * @pod_cpus: the cpumask of CPUs in the LLC pod + * @smt_pods: the SMT pod type, used to identify sibling groups + * + * A core is represented by the lowest-numbered CPU in its SMT group. Returns + * the number of distinct cores found in @pod_cpus. + */ +static int __init llc_count_cores(const struct cpumask *pod_cpus, + struct wq_pod_type *smt_pods) +{ + const struct cpumask *sibling_cpus; + int nr_cores = 0, c; + + /* + * Count distinct cores by only counting the first CPU in each + * SMT sibling group. + */ + for_each_cpu(c, pod_cpus) { + sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]]; + if (cpumask_first(sibling_cpus) == c) + nr_cores++; + } + + return nr_cores; +} + +/* + * llc_shard_size - number of cores in a given shard + * + * Cores are spread as evenly as possible. The first @nr_large_shards shards are + * "large shards" with (cores_per_shard + 1) cores; the rest are "default + * shards" with cores_per_shard cores. + */ +static int __init llc_shard_size(int shard_id, int cores_per_shard, int nr_large_shards) +{ + /* The first @nr_large_shards shards are large shards */ + if (shard_id < nr_large_shards) + return cores_per_shard + 1; + + /* The remaining shards are default shards */ + return cores_per_shard; +} + +/* + * llc_calc_shard_layout - compute the shard layout for an LLC pod + * @nr_cores: number of distinct cores in the LLC pod + * + * Chooses the number of shards that keeps average shard size closest to + * wq_cache_shard_size. Returns a struct describing the total number of shards, + * the base size of each, and how many are large shards. + */ +static struct llc_shard_layout __init llc_calc_shard_layout(int nr_cores) +{ + struct llc_shard_layout layout; + + /* Ensure at least one shard; pick the count closest to the target size */ + layout.nr_shards = max(1, DIV_ROUND_CLOSEST(nr_cores, wq_cache_shard_size)); + layout.cores_per_shard = nr_cores / layout.nr_shards; + layout.nr_large_shards = nr_cores % layout.nr_shards; + + return layout; +} + +/* + * llc_shard_is_full - check whether a shard has reached its core capacity + * @cores_in_shard: number of cores already assigned to this shard + * @shard_id: index of the shard being checked + * @layout: the shard layout computed by llc_calc_shard_layout() + * + * Returns true if @cores_in_shard equals the expected size for @shard_id. + */ +static bool __init llc_shard_is_full(int cores_in_shard, int shard_id, + const struct llc_shard_layout *layout) +{ + return cores_in_shard == llc_shard_size(shard_id, layout->cores_per_shard, + layout->nr_large_shards); +} + +/** + * llc_populate_cpu_shard_id - populate cpu_shard_id[] for each CPU in an LLC pod + * @pod_cpus: the cpumask of CPUs in the LLC pod + * @smt_pods: the SMT pod type, used to identify sibling groups + * @nr_cores: number of distinct cores in @pod_cpus (from llc_count_cores()) + * + * Walks @pod_cpus in order. At each SMT group leader, advances to the next + * shard once the current shard is full. Results are written to cpu_shard_id[]. + */ +static void __init llc_populate_cpu_shard_id(const struct cpumask *pod_cpus, + struct wq_pod_type *smt_pods, + int nr_cores) +{ + struct llc_shard_layout layout = llc_calc_shard_layout(nr_cores); + const struct cpumask *sibling_cpus; + /* Count the number of cores in the current shard_id */ + int cores_in_shard = 0; + unsigned int leader; + /* This is a cursor for the shards. Go from zero to nr_shards - 1*/ + int shard_id = 0; + int c; + + /* Iterate at every CPU for a given LLC pod, and assign it a shard */ + for_each_cpu(c, pod_cpus) { + sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]]; + if (cpumask_first(sibling_cpus) == c) { + /* This is the CPU leader for the siblings */ + if (llc_shard_is_full(cores_in_shard, shard_id, &layout)) { + shard_id++; + cores_in_shard = 0; + } + cores_in_shard++; + cpu_shard_id[c] = shard_id; + } else { + /* + * The siblings' shard MUST be the same as the leader. + * never split threads in the same core. + */ + leader = cpumask_first(sibling_cpus); + + /* + * This check silences a Warray-bounds warning on UP + * configs where NR_CPUS=1 makes cpu_shard_id[] + * a single-element array, and the compiler can't + * prove the index is always 0. + */ + if (WARN_ON_ONCE(leader >= nr_cpu_ids)) + continue; + cpu_shard_id[c] = cpu_shard_id[leader]; + } + } + + WARN_ON_ONCE(shard_id != (layout.nr_shards - 1)); +} + +/** + * precompute_cache_shard_ids - assign each CPU its shard index within its LLC + * + * Iterates over all LLC pods. For each pod, counts distinct cores then assigns + * shard indices to all CPUs in the pod. Must be called after WQ_AFFN_CACHE and + * WQ_AFFN_SMT have been initialized. + */ +static void __init precompute_cache_shard_ids(void) +{ + struct wq_pod_type *llc_pods = &wq_pod_types[WQ_AFFN_CACHE]; + struct wq_pod_type *smt_pods = &wq_pod_types[WQ_AFFN_SMT]; + const struct cpumask *cpus_sharing_llc; + int nr_cores; + int pod; + + if (!wq_cache_shard_size) { + pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n"); + wq_cache_shard_size = 1; + } + + for (pod = 0; pod < llc_pods->nr_pods; pod++) { + cpus_sharing_llc = llc_pods->pod_cpus[pod]; + + /* Number of cores in this given LLC */ + nr_cores = llc_count_cores(cpus_sharing_llc, smt_pods); + llc_populate_cpu_shard_id(cpus_sharing_llc, smt_pods, nr_cores); + } +} + +/* + * cpus_share_cache_shard - test whether two CPUs belong to the same cache shard + * + * Two CPUs share a cache shard if they are in the same LLC and have the same + * shard index. Used as the pod affinity callback for WQ_AFFN_CACHE_SHARD. + */ +static bool __init cpus_share_cache_shard(int cpu0, int cpu1) +{ + if (!cpus_share_cache(cpu0, cpu1)) + return false; + + return cpu_shard_id[cpu0] == cpu_shard_id[cpu1]; +} + /** * workqueue_init_topology - initialize CPU pods for unbound workqueues * @@ -8127,6 +8394,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); + precompute_cache_shard_ids(); + init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); wq_topo_initialized = true; |
