diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 11 | ||||
-rw-r--r-- | drivers/base/base.h | 4 | ||||
-rw-r--r-- | drivers/base/core.c | 368 | ||||
-rw-r--r-- | drivers/base/dd.c | 68 | ||||
-rw-r--r-- | drivers/base/devres.c | 25 | ||||
-rw-r--r-- | drivers/base/driver.c | 4 | ||||
-rw-r--r-- | drivers/base/firmware_loader/fallback_platform.c | 5 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 12 | ||||
-rw-r--r-- | drivers/base/memory.c | 15 | ||||
-rw-r--r-- | drivers/base/platform.c | 28 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 194 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 12 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 9 | ||||
-rw-r--r-- | drivers/base/power/trace.c | 4 | ||||
-rw-r--r-- | drivers/base/property.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/Kconfig | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 52 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 53 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 141 | ||||
-rw-r--r-- | drivers/base/swnode.c | 8 | ||||
-rw-r--r-- | drivers/base/topology.c | 2 |
21 files changed, 703 insertions, 316 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 4d0a0038b476..75f72d684294 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -54,6 +54,17 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } +DEFINE_PER_CPU(unsigned long, thermal_pressure); + +void topology_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure) +{ + int cpu; + + for_each_cpu(cpu, cpus) + WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); +} + static ssize_t cpu_capacity_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/base/base.h b/drivers/base/base.h index 95c22c0f9036..91cfb8405abd 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -93,6 +93,7 @@ struct device_private { struct klist_node knode_class; struct list_head deferred_probe; struct device_driver *async_driver; + char *deferred_probe_reason; struct device *device; u8 dead:1; }; @@ -134,6 +135,8 @@ extern void device_release_driver_internal(struct device *dev, extern void driver_detach(struct device_driver *drv); extern int driver_probe_device(struct device_driver *drv, struct device *dev); extern void driver_deferred_probe_del(struct device *dev); +extern void device_set_deferred_probe_reason(const struct device *dev, + struct va_format *vaf); static inline int driver_match_device(struct device_driver *drv, struct device *dev) { @@ -153,7 +156,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); extern void device_block_probing(void); extern void device_unblock_probing(void); -extern void driver_deferred_probe_force_trigger(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c..778c7a435cbd 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock); static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; static unsigned int defer_fw_devlink_count; +static LIST_HEAD(deferred_fw_devlink); static DEFINE_MUTEX(defer_fw_devlink_lock); static bool fw_devlink_is_permissive(void); @@ -235,6 +236,210 @@ void device_pm_move_to_tail(struct device *dev) device_links_read_unlock(idx); } +#define to_devlink(dev) container_of((dev), struct device_link, link_dev) + +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *status; + + switch (to_devlink(dev)->status) { + case DL_STATE_NONE: + status = "not tracked"; break; + case DL_STATE_DORMANT: + status = "dormant"; break; + case DL_STATE_AVAILABLE: + status = "available"; break; + case DL_STATE_CONSUMER_PROBE: + status = "consumer probing"; break; + case DL_STATE_ACTIVE: + status = "active"; break; + case DL_STATE_SUPPLIER_UNBIND: + status = "supplier unbinding"; break; + default: + status = "unknown"; break; + } + return sprintf(buf, "%s\n", status); +} +static DEVICE_ATTR_RO(status); + +static ssize_t auto_remove_on_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + char *str; + + if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + str = "supplier unbind"; + else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + str = "consumer unbind"; + else + str = "never"; + + return sprintf(buf, "%s\n", str); +} +static DEVICE_ATTR_RO(auto_remove_on); + +static ssize_t runtime_pm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); +} +static DEVICE_ATTR_RO(runtime_pm); + +static ssize_t sync_state_only_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); +} +static DEVICE_ATTR_RO(sync_state_only); + +static struct attribute *devlink_attrs[] = { + &dev_attr_status.attr, + &dev_attr_auto_remove_on.attr, + &dev_attr_runtime_pm.attr, + &dev_attr_sync_state_only.attr, + NULL, +}; +ATTRIBUTE_GROUPS(devlink); + +static void device_link_free(struct device_link *link) +{ + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + + put_device(link->consumer); + put_device(link->supplier); + kfree(link); +} + +#ifdef CONFIG_SRCU +static void __device_link_free_srcu(struct rcu_head *rhead) +{ + device_link_free(container_of(rhead, struct device_link, rcu_head)); +} + +static void devlink_dev_release(struct device *dev) +{ + struct device_link *link = to_devlink(dev); + + call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); +} +#else +static void devlink_dev_release(struct device *dev) +{ + device_link_free(to_devlink(dev)); +} +#endif + +static struct class devlink_class = { + .name = "devlink", + .owner = THIS_MODULE, + .dev_groups = devlink_groups, + .dev_release = devlink_dev_release, +}; + +static int devlink_add_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + size_t len; + struct device_link *link = to_devlink(dev); + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); + if (ret) + goto out; + + ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); + if (ret) + goto err_con; + + snprintf(buf, len, "consumer:%s", dev_name(con)); + ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_con_dev; + + snprintf(buf, len, "supplier:%s", dev_name(sup)); + ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_sup_dev; + + goto out; + +err_sup_dev: + snprintf(buf, len, "consumer:%s", dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); +err_con_dev: + sysfs_remove_link(&link->link_dev.kobj, "consumer"); +err_con: + sysfs_remove_link(&link->link_dev.kobj, "supplier"); +out: + kfree(buf); + return ret; +} + +static void devlink_remove_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + struct device_link *link = to_devlink(dev); + size_t len; + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + sysfs_remove_link(&link->link_dev.kobj, "consumer"); + sysfs_remove_link(&link->link_dev.kobj, "supplier"); + + len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) { + WARN(1, "Unable to properly free device link symlinks!\n"); + return; + } + + snprintf(buf, len, "supplier:%s", dev_name(sup)); + sysfs_remove_link(&con->kobj, buf); + snprintf(buf, len, "consumer:%s", dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); + kfree(buf); +} + +static struct class_interface devlink_class_intf = { + .class = &devlink_class, + .add_dev = devlink_add_symlinks, + .remove_dev = devlink_remove_symlinks, +}; + +static int __init devlink_class_init(void) +{ + int ret; + + ret = class_register(&devlink_class); + if (ret) + return ret; + + ret = class_interface_register(&devlink_class_intf); + if (ret) + class_unregister(&devlink_class); + + return ret; +} +postcore_initcall(devlink_class_init); + #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ DL_FLAG_AUTOREMOVE_SUPPLIER | \ DL_FLAG_AUTOPROBE_CONSUMER | \ @@ -407,13 +612,6 @@ struct device_link *device_link_add(struct device *consumer, refcount_set(&link->rpm_active, 1); - if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) - refcount_inc(&link->rpm_active); - - pm_runtime_new_link(consumer); - } - get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -423,6 +621,25 @@ struct device_link *device_link_add(struct device *consumer, link->flags = flags; kref_init(&link->kref); + link->link_dev.class = &devlink_class; + device_set_pm_not_required(&link->link_dev); + dev_set_name(&link->link_dev, "%s--%s", + dev_name(supplier), dev_name(consumer)); + if (device_register(&link->link_dev)) { + put_device(consumer); + put_device(supplier); + kfree(link); + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + + pm_runtime_new_link(consumer); + } + /* Determine the initial link state. */ if (flags & DL_FLAG_STATELESS) link->status = DL_STATE_NONE; @@ -538,22 +755,7 @@ static void device_link_add_missing_supplier_links(void) mutex_unlock(&wfs_lock); } -static void device_link_free(struct device_link *link) -{ - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); - - put_device(link->consumer); - put_device(link->supplier); - kfree(link); -} - #ifdef CONFIG_SRCU -static void __device_link_free_srcu(struct rcu_head *rhead) -{ - device_link_free(container_of(rhead, struct device_link, rcu_head)); -} - static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); @@ -566,7 +768,7 @@ static void __device_link_del(struct kref *kref) list_del_rcu(&link->s_node); list_del_rcu(&link->c_node); - call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); + device_unregister(&link->link_dev); } #else /* !CONFIG_SRCU */ static void __device_link_del(struct kref *kref) @@ -581,7 +783,7 @@ static void __device_link_del(struct kref *kref) list_del(&link->s_node); list_del(&link->c_node); - device_link_free(link); + device_unregister(&link->link_dev); } #endif /* !CONFIG_SRCU */ @@ -754,11 +956,11 @@ static void __device_links_queue_sync_state(struct device *dev, */ dev->state_synced = true; - if (WARN_ON(!list_empty(&dev->links.defer_sync))) + if (WARN_ON(!list_empty(&dev->links.defer_hook))) return; get_device(dev); - list_add_tail(&dev->links.defer_sync, list); + list_add_tail(&dev->links.defer_hook, list); } /** @@ -776,8 +978,8 @@ static void device_links_flush_sync_list(struct list_head *list, { struct device *dev, *tmp; - list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { - list_del_init(&dev->links.defer_sync); + list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); if (dev != dont_lock_dev) device_lock(dev); @@ -815,12 +1017,12 @@ void device_links_supplier_sync_state_resume(void) if (defer_sync_state_count) goto out; - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { /* * Delete from deferred_sync list before queuing it to - * sync_list because defer_sync is used for both lists. + * sync_list because defer_hook is used for both lists. */ - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_queue_sync_state(dev, &sync_list); } out: @@ -838,8 +1040,8 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) - list_add_tail(&sup->links.defer_sync, &deferred_sync); + if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) + list_add_tail(&sup->links.defer_hook, &deferred_sync); } static void device_link_drop_managed(struct device_link *link) @@ -849,6 +1051,22 @@ static void device_link_drop_managed(struct device_link *link) kref_put(&link->kref, __device_link_del); } +static ssize_t waiting_for_supplier_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + bool val; + + device_lock(dev); + mutex_lock(&wfs_lock); + val = !list_empty(&dev->links.needs_suppliers) + && dev->links.need_for_probe; + mutex_unlock(&wfs_lock); + device_unlock(dev); + return sprintf(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(waiting_for_supplier); + /** * device_links_driver_bound - Update device links after probing its driver. * @dev: Device to update the links for. @@ -873,6 +1091,7 @@ void device_links_driver_bound(struct device *dev) mutex_lock(&wfs_lock); list_del_init(&dev->links.needs_suppliers); mutex_unlock(&wfs_lock); + device_remove_file(dev, &dev_attr_waiting_for_supplier); device_links_write_lock(); @@ -1052,7 +1271,7 @@ void device_links_driver_cleanup(struct device *dev) WRITE_ONCE(link->status, DL_STATE_DORMANT); } - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_no_driver(dev); device_links_write_unlock(); @@ -1159,6 +1378,9 @@ static void device_links_purge(struct device *dev) { struct device_link *link, *ln; + if (dev->class == &devlink_class) + return; + mutex_lock(&wfs_lock); list_del(&dev->links.needs_suppliers); mutex_unlock(&wfs_lock); @@ -1244,6 +1466,12 @@ static void fw_devlink_link_device(struct device *dev) fw_ret = -EAGAIN; } else { fw_ret = -ENODEV; + /* + * defer_hook is not used to add device to deferred_sync list + * until device is bound. Since deferred fw devlink also blocks + * probing, same list hook can be used for deferred_fw_devlink. + */ + list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); } if (fw_ret == -ENODEV) @@ -1312,6 +1540,9 @@ void fw_devlink_pause(void) */ void fw_devlink_resume(void) { + struct device *dev, *tmp; + LIST_HEAD(probe_list); + mutex_lock(&defer_fw_devlink_lock); if (!defer_fw_devlink_count) { WARN(true, "Unmatched fw_devlink pause/resume!"); @@ -1323,9 +1554,19 @@ void fw_devlink_resume(void) goto out; device_link_add_missing_supplier_links(); - driver_deferred_probe_force_trigger(); + list_splice_tail_init(&deferred_fw_devlink, &probe_list); out: mutex_unlock(&defer_fw_devlink_lock); + + /* + * bus_probe_device() can cause new devices to get added and they'll + * try to grab defer_fw_devlink_lock. So, this needs to be done outside + * the defer_fw_devlink_lock. + */ + list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); + bus_probe_device(dev); + } } /* Device links support end. */ @@ -1949,8 +2190,16 @@ static int device_add_attrs(struct device *dev) goto err_remove_dev_groups; } + if (fw_devlink_flags && !fw_devlink_is_permissive()) { + error = device_create_file(dev, &dev_attr_waiting_for_supplier); + if (error) + goto err_remove_dev_online; + } + return 0; + err_remove_dev_online: + device_remove_file(dev, &dev_attr_online); err_remove_dev_groups: device_remove_groups(dev, dev->groups); err_remove_type_groups: @@ -1968,6 +2217,7 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; + device_remove_file(dev, &dev_attr_waiting_for_supplier); device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); @@ -2172,7 +2422,7 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.needs_suppliers); - INIT_LIST_HEAD(&dev->links.defer_sync); + INIT_LIST_HEAD(&dev->links.defer_hook); dev->links.status = DL_DEV_NO_DRIVER; } EXPORT_SYMBOL_GPL(device_initialize); @@ -3953,6 +4203,52 @@ define_dev_printk_level(_dev_info, KERN_INFO); #endif +/** + * dev_err_probe - probe error check and log helper + * @dev: the pointer to the struct device + * @err: error value to test + * @fmt: printf-style format string + * @...: arguments as specified in the format string + * + * This helper implements common pattern present in probe functions for error + * checking: print debug or error message depending if the error value is + * -EPROBE_DEFER and propagate error upwards. + * In case of -EPROBE_DEFER it sets also defer probe reason, which can be + * checked later by reading devices_deferred debugfs attribute. + * It replaces code sequence: + * if (err != -EPROBE_DEFER) + * dev_err(dev, ...); + * else + * dev_dbg(dev, ...); + * return err; + * with + * return dev_err_probe(dev, err, ...); + * + * Returns @err. + * + */ +int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + if (err != -EPROBE_DEFER) { + dev_err(dev, "error %d: %pV", err, &vaf); + } else { + device_set_deferred_probe_reason(dev, &vaf); + dev_dbg(dev, "error %d: %pV", err, &vaf); + } + + va_end(args); + + return err; +} +EXPORT_SYMBOL_GPL(dev_err_probe); + static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) { return fwnode && !IS_ERR(fwnode->secondary); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9a1d940342ac..857b0a928e8d 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -27,6 +27,7 @@ #include <linux/async.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/devinfo.h> +#include <linux/slab.h> #include "base.h" #include "power/power.h" @@ -136,6 +137,8 @@ void driver_deferred_probe_del(struct device *dev) if (!list_empty(&dev->p->deferred_probe)) { dev_dbg(dev, "Removed from deferred list\n"); list_del_init(&dev->p->deferred_probe); + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = NULL; } mutex_unlock(&deferred_probe_mutex); } @@ -164,11 +167,6 @@ static void driver_deferred_probe_trigger(void) if (!driver_deferred_probe_enable) return; - driver_deferred_probe_force_trigger(); -} - -void driver_deferred_probe_force_trigger(void) -{ /* * A successful probe means that all the devices in the pending list * should be triggered to be reprobed. Move all the deferred devices @@ -211,6 +209,23 @@ void device_unblock_probing(void) driver_deferred_probe_trigger(); } +/** + * device_set_deferred_probe_reason() - Set defer probe reason message for device + * @dev: the pointer to the struct device + * @vaf: the pointer to va_format structure with message + */ +void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf) +{ + const char *drv = dev_driver_string(dev); + + mutex_lock(&deferred_probe_mutex); + + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf); + + mutex_unlock(&deferred_probe_mutex); +} + /* * deferred_devs_show() - Show the devices in the deferred probe pending list. */ @@ -221,7 +236,8 @@ static int deferred_devs_show(struct seq_file *s, void *data) mutex_lock(&deferred_probe_mutex); list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe) - seq_printf(s, "%s\n", dev_name(curr->device)); + seq_printf(s, "%s\t%s", dev_name(curr->device), + curr->device->p->deferred_probe_reason ?: "\n"); mutex_unlock(&deferred_probe_mutex); @@ -281,7 +297,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) dev_info(private->device, "deferred probe pending\n"); - wake_up(&probe_timeout_waitqueue); + wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -430,10 +446,9 @@ static void driver_sysfs_remove(struct device *dev) * Allow manual attachment of a driver to a device. * Caller must have already set @dev->driver. * - * Note that this does not modify the bus reference count - * nor take the bus's rwsem. Please verify those are accounted - * for before calling this. (It is ok to call with no other effort - * from a driver's probe() method.) + * Note that this does not modify the bus reference count. + * Please verify that is accounted for before calling this. + * (It is ok to call with no other effort from a driver's probe() method.) * * This function must be called with the device lock held. */ @@ -463,6 +478,18 @@ static void driver_deferred_probe_add_trigger(struct device *dev, driver_deferred_probe_trigger(); } +static ssize_t state_synced_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + bool val; + + device_lock(dev); + val = dev->state_synced; + device_unlock(dev); + return sprintf(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(state_synced); + static int really_probe(struct device *dev, struct device_driver *drv) { int ret = -EPROBE_DEFER; @@ -492,7 +519,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->bus->name, __func__, drv->name, dev_name(dev)); if (!list_empty(&dev->devres_head)) { dev_crit(dev, "Resources present before probing\n"); - return -EBUSY; + ret = -EBUSY; + goto done; } re_probe: @@ -536,9 +564,16 @@ re_probe: goto dev_groups_failed; } + if (dev_has_sync_state(dev) && + device_create_file(dev, &dev_attr_state_synced)) { + dev_err(dev, "state_synced sysfs add failed\n"); + goto dev_sysfs_state_synced_failed; + } + if (test_remove) { test_remove = false; + device_remove_file(dev, &dev_attr_state_synced); device_remove_groups(dev, drv->dev_groups); if (dev->bus->remove) @@ -568,6 +603,8 @@ re_probe: drv->bus->name, __func__, dev_name(dev), drv->name); goto done; +dev_sysfs_state_synced_failed: + device_remove_groups(dev, drv->dev_groups); dev_groups_failed: if (dev->bus->remove) dev->bus->remove(dev); @@ -612,7 +649,7 @@ pinctrl_bind_failed: ret = 0; done: atomic_dec(&probe_count); - wake_up(&probe_waitqueue); + wake_up_all(&probe_waitqueue); return ret; } @@ -848,7 +885,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; @@ -1105,6 +1144,7 @@ static void __device_release_driver(struct device *dev, struct device *parent) pm_runtime_put_sync(dev); + device_remove_file(dev, &dev_attr_state_synced); device_remove_groups(dev, drv->dev_groups); if (dev->bus && dev->bus->remove) diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 0bbb328bd17f..ed615d3b9cf1 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -89,15 +89,23 @@ static struct devres_group * node_to_group(struct devres_node *node) return NULL; } +static bool check_dr_size(size_t size, size_t *tot_size) +{ + /* We must catch any near-SIZE_MAX cases that could overflow. */ + if (unlikely(check_add_overflow(sizeof(struct devres), + size, tot_size))) + return false; + + return true; +} + static __always_inline struct devres * alloc_dr(dr_release_t release, size_t size, gfp_t gfp, int nid) { size_t tot_size; struct devres *dr; - /* We must catch any near-SIZE_MAX cases that could overflow. */ - if (unlikely(check_add_overflow(sizeof(struct devres), size, - &tot_size))) + if (!check_dr_size(size, &tot_size)) return NULL; dr = kmalloc_node_track_caller(tot_size, gfp, nid); @@ -807,10 +815,13 @@ static int devm_kmalloc_match(struct device *dev, void *res, void *data) * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ -void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) +void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) { struct devres *dr; + if (unlikely(!size)) + return ZERO_SIZE_PTR; + /* use raw alloc_dr for kmalloc caller tracing */ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); if (unlikely(!dr)) @@ -942,10 +953,10 @@ void devm_kfree(struct device *dev, const void *p) int rc; /* - * Special case: pointer to a string in .rodata returned by - * devm_kstrdup_const(). + * Special cases: pointer to a string in .rodata returned by + * devm_kstrdup_const() or NULL/ZERO ptr. */ - if (unlikely(is_kernel_rodata((unsigned long)p))) + if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) return; rc = devres_destroy(dev, devm_kmalloc_release, diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 57c68769e157..8c0d33e182fd 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -158,12 +158,12 @@ int driver_register(struct device_driver *drv) if ((drv->bus->probe && drv->probe) || (drv->bus->remove && drv->remove) || (drv->bus->shutdown && drv->shutdown)) - printk(KERN_WARNING "Driver '%s' needs updating - please use " + pr_warn("Driver '%s' needs updating - please use " "bus_type methods\n", drv->name); other = driver_find(drv->name, drv->bus); if (other) { - printk(KERN_ERR "Error: Driver '%s' is already registered, " + pr_err("Error: Driver '%s' is already registered, " "aborting...\n", drv->name); return -EBUSY; } diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c index cdd2c9a9f38a..685edb7dd05a 100644 --- a/drivers/base/firmware_loader/fallback_platform.c +++ b/drivers/base/firmware_loader/fallback_platform.c @@ -25,7 +25,10 @@ int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags) if (rc) return rc; /* rc == -ENOENT when the fw was not found */ - fw_priv->data = vmalloc(size); + if (fw_priv->data && size > fw_priv->allocated_size) + return -ENOMEM; + if (!fw_priv->data) + fw_priv->data = vmalloc(size); if (!fw_priv->data) return -ENOMEM; diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index ca871b13524e..9da0c9d5f538 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -838,12 +838,12 @@ EXPORT_SYMBOL(request_firmware); * @name: name of firmware file * @device: device for which firmware is being loaded * - * This function is similar in behaviour to request_firmware(), except - * it doesn't produce warning messages when the file is not found. - * The sysfs fallback mechanism is enabled if direct filesystem lookup fails, - * however, however failures to find the firmware file with it are still - * suppressed. It is therefore up to the driver to check for the return value - * of this call and to decide when to inform the users of errors. + * This function is similar in behaviour to request_firmware(), except it + * doesn't produce warning messages when the file is not found. The sysfs + * fallback mechanism is enabled if direct filesystem lookup fails. However, + * failures to find the firmware file with it are still suppressed. It is + * therefore up to the driver to check for the return value of this call and to + * decide when to inform the users of errors. **/ int firmware_request_nowarn(const struct firmware **firmware, const char *name, struct device *device) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2b09b68b9f78..4db3c660de83 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -50,14 +50,14 @@ int memhp_online_type_from_str(const char *str) static int sections_per_block; -static inline unsigned long base_memory_block_id(unsigned long section_nr) +static inline unsigned long memory_block_id(unsigned long section_nr) { return section_nr / sections_per_block; } static inline unsigned long pfn_to_block_id(unsigned long pfn) { - return base_memory_block_id(pfn_to_section_nr(pfn)); + return memory_block_id(pfn_to_section_nr(pfn)); } static inline unsigned long phys_to_block_id(unsigned long phys) @@ -517,7 +517,7 @@ static struct memory_block *find_memory_block_by_id(unsigned long block_id) */ struct memory_block *find_memory_block(struct mem_section *section) { - unsigned long block_id = base_memory_block_id(__section_nr(section)); + unsigned long block_id = memory_block_id(__section_nr(section)); return find_memory_block_by_id(block_id); } @@ -570,8 +570,7 @@ int register_memory(struct memory_block *memory) return ret; } -static int init_memory_block(struct memory_block **memory, - unsigned long block_id, unsigned long state) +static int init_memory_block(unsigned long block_id, unsigned long state) { struct memory_block *mem; unsigned long start_pfn; @@ -594,14 +593,12 @@ static int init_memory_block(struct memory_block **memory, ret = register_memory(mem); - *memory = mem; return ret; } static int add_memory_block(unsigned long base_section_nr) { int section_count = 0; - struct memory_block *mem; unsigned long nr; for (nr = base_section_nr; nr < base_section_nr + sections_per_block; @@ -611,7 +608,7 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; - return init_memory_block(&mem, base_memory_block_id(base_section_nr), + return init_memory_block(memory_block_id(base_section_nr), MEM_ONLINE); } @@ -647,7 +644,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size) return -EINVAL; for (block_id = start_block_id; block_id != end_block_id; block_id++) { - ret = init_memory_block(&mem, block_id, MEM_OFFLINE); + ret = init_memory_block(block_id, MEM_OFFLINE); if (ret) break; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c0d0a5490ac6..e5d8a0503b4f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1019,7 +1019,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, if (len != -ENODEV) return len; - len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; @@ -1076,13 +1076,37 @@ static ssize_t driver_override_show(struct device *dev, } static DEVICE_ATTR_RW(driver_override); +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", dev_to_node(dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + + if (a == &dev_attr_numa_node.attr && + dev_to_node(dev) == NUMA_NO_NODE) + return 0; + + return a->mode; +} static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, + &dev_attr_numa_node.attr, &dev_attr_driver_override.attr, NULL, }; -ATTRIBUTE_GROUPS(platform_dev); + +static struct attribute_group platform_dev_group = { + .attrs = platform_dev_attrs, + .is_visible = platform_dev_attrs_visible, +}; +__ATTRIBUTE_GROUPS(platform_dev); static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0a01df608849..2cb5e04cf86c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, /* * Traverse all sub-domains within the domain. This can be * done without any additional locking as the link->performance_state - * field is protected by the master genpd->lock, which is already taken. + * field is protected by the parent genpd->lock, which is already taken. * * Also note that link->performance_state (subdomain's performance state - * requirement to master domain) is different from - * link->slave->performance_state (current performance state requirement + * requirement to parent domain) is different from + * link->child->performance_state (current performance state requirement * of the devices/sub-domains of the subdomain) and so can have a * different value. * * Note that we also take vote from powered-off sub-domains into account * as the same is done for devices right now. */ - list_for_each_entry(link, &genpd->master_links, master_node) { + list_for_each_entry(link, &genpd->parent_links, parent_node) { if (link->performance_state > state) state = link->performance_state; } @@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, static int _genpd_set_performance_state(struct generic_pm_domain *genpd, unsigned int state, int depth) { - struct generic_pm_domain *master; + struct generic_pm_domain *parent; struct gpd_link *link; - int master_state, ret; + int parent_state, ret; if (state == genpd->performance_state) return 0; - /* Propagate to masters of genpd */ - list_for_each_entry(link, &genpd->slave_links, slave_node) { - master = link->master; + /* Propagate to parents of genpd */ + list_for_each_entry(link, &genpd->child_links, child_node) { + parent = link->parent; - if (!master->set_performance_state) + if (!parent->set_performance_state) continue; - /* Find master's performance state */ + /* Find parent's performance state */ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, - master->opp_table, + parent->opp_table, state); if (unlikely(ret < 0)) goto err; - master_state = ret; + parent_state = ret; - genpd_lock_nested(master, depth + 1); + genpd_lock_nested(parent, depth + 1); link->prev_performance_state = link->performance_state; - link->performance_state = master_state; - master_state = _genpd_reeval_performance_state(master, - master_state); - ret = _genpd_set_performance_state(master, master_state, depth + 1); + link->performance_state = parent_state; + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + ret = _genpd_set_performance_state(parent, parent_state, depth + 1); if (ret) link->performance_state = link->prev_performance_state; - genpd_unlock(master); + genpd_unlock(parent); if (ret) goto err; @@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd, err: /* Encountered an error, lets rollback */ - list_for_each_entry_continue_reverse(link, &genpd->slave_links, - slave_node) { - master = link->master; + list_for_each_entry_continue_reverse(link, &genpd->child_links, + child_node) { + parent = link->parent; - if (!master->set_performance_state) + if (!parent->set_performance_state) continue; - genpd_lock_nested(master, depth + 1); + genpd_lock_nested(parent, depth + 1); - master_state = link->prev_performance_state; - link->performance_state = master_state; + parent_state = link->prev_performance_state; + link->performance_state = parent_state; - master_state = _genpd_reeval_performance_state(master, - master_state); - if (_genpd_set_performance_state(master, master_state, depth + 1)) { + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { pr_err("%s: Failed to roll back to %d performance state\n", - master->name, master_state); + parent->name, parent_state); } - genpd_unlock(master); + genpd_unlock(parent); } return ret; @@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, /* * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the master yet after + * managed to call genpd_power_on() for the parent yet after * incrementing it. In that case genpd_power_on() will wait * for us to drop the lock, so we can call .power_off() and let * the genpd_power_on() restore power for us (this shouldn't @@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, genpd->status = GPD_STATE_POWER_OFF; genpd_update_accounting(genpd); - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_lock_nested(link->master, depth + 1); - genpd_power_off(link->master, false, depth + 1); - genpd_unlock(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); } return 0; } /** - * genpd_power_on - Restore power to a given PM domain and its masters. + * genpd_power_on - Restore power to a given PM domain and its parents. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. * - * Restore power to @genpd and all of its masters so that it is possible to + * Restore power to @genpd and all of its parents so that it is possible to * resume a device belonging to it. */ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) @@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) /* * The list is guaranteed not to change while the loop below is being - * executed, unless one of the masters' .power_on() callbacks fiddles + * executed, unless one of the parents' .power_on() callbacks fiddles * with it. */ - list_for_each_entry(link, &genpd->slave_links, slave_node) { - struct generic_pm_domain *master = link->master; + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; - genpd_sd_counter_inc(master); + genpd_sd_counter_inc(parent); - genpd_lock_nested(master, depth + 1); - ret = genpd_power_on(master, depth + 1); - genpd_unlock(master); + genpd_lock_nested(parent, depth + 1); + ret = genpd_power_on(parent, depth + 1); + genpd_unlock(parent); if (ret) { - genpd_sd_counter_dec(master); + genpd_sd_counter_dec(parent); goto err; } } @@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) err: list_for_each_entry_continue_reverse(link, - &genpd->slave_links, - slave_node) { - genpd_sd_counter_dec(link->master); - genpd_lock_nested(link->master, depth + 1); - genpd_power_off(link->master, false, depth + 1); - genpd_unlock(link->master); + &genpd->child_links, + child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); } return ret; @@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused); #ifdef CONFIG_PM_SLEEP /** - * genpd_sync_power_off - Synchronously power off a PM domain and its masters. + * genpd_sync_power_off - Synchronously power off a PM domain and its parents. * @genpd: PM domain to power off, if possible. * @use_lock: use the lock. * @depth: nesting count for lockdep. * * Check if the given PM domain can be powered off (during system suspend or - * hibernation) and do that if so. Also, in that case propagate to its masters. + * hibernation) and do that if so. Also, in that case propagate to its parents. * * This function is only called in "noirq" and "syscore" stages of system power * transitions. The "noirq" callbacks may be executed asynchronously, thus in @@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, genpd->status = GPD_STATE_POWER_OFF; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); if (use_lock) - genpd_lock_nested(link->master, depth + 1); + genpd_lock_nested(link->parent, depth + 1); - genpd_sync_power_off(link->master, use_lock, depth + 1); + genpd_sync_power_off(link->parent, use_lock, depth + 1); if (use_lock) - genpd_unlock(link->master); + genpd_unlock(link->parent); } } /** - * genpd_sync_power_on - Synchronously power on a PM domain and its masters. + * genpd_sync_power_on - Synchronously power on a PM domain and its parents. * @genpd: PM domain to power on. * @use_lock: use the lock. * @depth: nesting count for lockdep. @@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, if (genpd_status_on(genpd)) return; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_inc(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_inc(link->parent); if (use_lock) - genpd_lock_nested(link->master, depth + 1); + genpd_lock_nested(link->parent, depth + 1); - genpd_sync_power_on(link->master, use_lock, depth + 1); + genpd_sync_power_on(link->parent, use_lock, depth + 1); if (use_lock) - genpd_unlock(link->master); + genpd_unlock(link->parent); } _genpd_power_on(genpd, false); @@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd, if (!genpd_is_cpu_domain(genpd)) return; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - struct generic_pm_domain *master = link->master; + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; - genpd_lock_nested(master, depth + 1); - genpd_update_cpumask(master, cpu, set, depth + 1); - genpd_unlock(master); + genpd_lock_nested(parent, depth + 1); + genpd_update_cpumask(parent, cpu, set, depth + 1); + genpd_unlock(parent); } if (set) @@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(itr, &genpd->master_links, master_node) { - if (itr->slave == subdomain && itr->master == genpd) { + list_for_each_entry(itr, &genpd->parent_links, parent_node) { + if (itr->child == subdomain && itr->parent == genpd) { ret = -EINVAL; goto out; } } - link->master = genpd; - list_add_tail(&link->master_node, &genpd->master_links); - link->slave = subdomain; - list_add_tail(&link->slave_node, &subdomain->slave_links); + link->parent = genpd; + list_add_tail(&link->parent_node, &genpd->parent_links); + link->child = subdomain; + list_add_tail(&link->child_node, &subdomain->child_links); if (genpd_status_on(subdomain)) genpd_sd_counter_inc(genpd); @@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. - * @genpd: Master PM domain to add the subdomain to. + * @genpd: Leader PM domain to add the subdomain to. * @subdomain: Subdomain to be added. */ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, @@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. - * @genpd: Master PM domain to remove the subdomain from. + * @genpd: Leader PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. */ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, @@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, genpd_lock(subdomain); genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); - if (!list_empty(&subdomain->master_links) || subdomain->device_count) { + if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { pr_warn("%s: unable to remove subdomain %s\n", genpd->name, subdomain->name); ret = -EBUSY; goto out; } - list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { - if (link->slave != subdomain) + list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { + if (link->child != subdomain) continue; - list_del(&link->master_node); - list_del(&link->slave_node); + list_del(&link->parent_node); + list_del(&link->child_node); kfree(link); if (genpd_status_on(subdomain)) genpd_sd_counter_dec(genpd); @@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - INIT_LIST_HEAD(&genpd->master_links); - INIT_LIST_HEAD(&genpd->slave_links); + INIT_LIST_HEAD(&genpd->parent_links); + INIT_LIST_HEAD(&genpd->child_links); INIT_LIST_HEAD(&genpd->dev_list); genpd_lock_init(genpd); genpd->gov = gov; @@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd) return -EBUSY; } - if (!list_empty(&genpd->master_links) || genpd->device_count) { + if (!list_empty(&genpd->parent_links) || genpd->device_count) { genpd_unlock(genpd); pr_err("%s: unable to remove %s\n", __func__, genpd->name); return -EBUSY; } - list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { - list_del(&link->master_node); - list_del(&link->slave_node); + list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { + list_del(&link->parent_node); + list_del(&link->child_node); kfree(link); } @@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s, /* * Modifications on the list require holding locks on both - * master and slave, so we are safe. + * parent and child, so we are safe. * Also genpd->name is immutable. */ - list_for_each_entry(link, &genpd->master_links, master_node) { - seq_printf(s, "%s", link->slave->name); - if (!list_is_last(&link->master_node, &genpd->master_links)) + list_for_each_entry(link, &genpd->parent_links, parent_node) { + seq_printf(s, "%s", link->child->name); + if (!list_is_last(&link->parent_node, &genpd->parent_links)) seq_puts(s, ", "); } @@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, "domain status slaves\n"); + seq_puts(s, "domain status children\n"); seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); @@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data) if (ret) return -ERESTARTSYS; - list_for_each_entry(link, &genpd->master_links, master_node) - seq_printf(s, "%s\n", link->slave->name); + list_for_each_entry(link, &genpd->parent_links, parent_node) + seq_printf(s, "%s\n", link->child->name); genpd_unlock(genpd); return ret; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index daa8c7689f7e..490ed7deb99a 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, * * All subdomains have been powered off already at this point. */ - list_for_each_entry(link, &genpd->master_links, master_node) { - struct generic_pm_domain *sd = link->slave; + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *sd = link->child; s64 sd_max_off_ns = sd->max_off_time_ns; if (sd_max_off_ns < 0) @@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) } /* - * We have to invalidate the cached results for the masters, so + * We have to invalidate the cached results for the parents, so * use the observation that default_power_down_ok() is not - * going to be called for any master until this instance + * going to be called for any parent until this instance * returns. */ - list_for_each_entry(link, &genpd->slave_links, slave_node) - link->master->max_off_time_changed = true; + list_for_each_entry(link, &genpd->child_links, child_node) + link->parent->max_off_time_changed = true; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = false; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 24d25cf8ab14..c7b24812523c 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* sysfs entries for device PM */ #include <linux/device.h> +#include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> #include <linux/pm_qos.h> @@ -739,12 +740,18 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) int wakeup_sysfs_add(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); + int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); + + if (!ret) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); + + return ret; } void wakeup_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } int pm_qos_sysfs_add_resume_latency(struct device *dev) diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 977d27bd1a22..a97f33d0c59f 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = { .notifier_call = pm_trace_notify, }; -static int early_resume_init(void) +static int __init early_resume_init(void) { hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; } -static int late_resume_init(void) +static int __init late_resume_init(void) { unsigned int val = hash_value_early_read; unsigned int user, file, dev; diff --git a/drivers/base/property.c b/drivers/base/property.c index 1e6d75e65938..d58aa98fe964 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, return next; /* When no more children in primary, continue with secondary */ - if (!IS_ERR_OR_NULL(fwnode->secondary)) + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) next = fwnode_get_next_child_node(fwnode->secondary, child); return next; diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0fd6f97ee523..1d1d26b0d279 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,7 +4,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C) select IRQ_DOMAIN if REGMAP_IRQ bool diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 089e5dc7144a..f58baff2be0a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_only; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } + map->lock(map->lock_arg); - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_bypass; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; + map->lock(map->lock_arg); - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4340e1d268b6..369a57e6f89d 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -541,9 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** - * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling + * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling * - * @np: The device_node where the IRQ domain should be added to. + * @fwnode: The firmware node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts. * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -557,10 +557,11 @@ static const struct irq_domain_ops regmap_domain_ops = { * register cache. The chip driver is responsible for restoring the * register values used by the IRQ controller over suspend and resume. */ -int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; int i; @@ -771,10 +772,12 @@ int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, } if (irq_base) - d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base, + d->domain = irq_domain_add_legacy(to_of_node(fwnode), + chip->num_irqs, irq_base, 0, ®map_domain_ops, d); else - d->domain = irq_domain_add_linear(np, chip->num_irqs, + d->domain = irq_domain_add_linear(to_of_node(fwnode), + chip->num_irqs, ®map_domain_ops, d); if (!d->domain) { dev_err(map->dev, "Failed to create IRQ domain\n"); @@ -808,7 +811,7 @@ err_alloc: kfree(d); return ret; } -EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np); +EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); /** * regmap_add_irq_chip() - Use standard regmap IRQ controller handling @@ -822,15 +825,15 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np); * * Returns 0 on success or an errno on failure. * - * This is the same as regmap_add_irq_chip_np, except that the device + * This is the same as regmap_add_irq_chip_fwnode, except that the firmware * node of the regmap is used. */ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { - return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags, - irq_base, chip, data); + return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, + irq_flags, irq_base, chip, data); } EXPORT_SYMBOL_GPL(regmap_add_irq_chip); @@ -899,10 +902,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) } /** - * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np() + * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() * * @dev: The device pointer on which irq_chip belongs to. - * @np: The device_node where the IRQ domain should be added to. + * @fwnode: The firmware node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -915,11 +918,12 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) * The ®map_irq_chip_data will be automatically released when the device is * unbound. */ -int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, - struct regmap *map, int irq, int irq_flags, - int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int devm_regmap_add_irq_chip_fwnode(struct device *dev, + struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data **ptr, *d; int ret; @@ -929,8 +933,8 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, if (!ptr) return -ENOMEM; - ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base, - chip, &d); + ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, + chip, &d); if (ret < 0) { devres_free(ptr); return ret; @@ -941,7 +945,7 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, *data = d; return 0; } -EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np); +EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); /** * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() @@ -964,8 +968,9 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { - return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq, - irq_flags, irq_base, chip, data); + return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, + irq, irq_flags, irq_base, chip, + data); } EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c472f624382d..e93700af7e6e 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -11,12 +11,13 @@ #include <linux/export.h> #include <linux/mutex.h> #include <linux/err.h> -#include <linux/of.h> +#include <linux/property.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/hwspinlock.h> +#include <asm/unaligned.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { - __be16 *b = buf; - - b[0] = cpu_to_be16(val << shift); + put_unaligned_be16(val << shift, buf); } static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) { - __le16 *b = buf; - - b[0] = cpu_to_le16(val << shift); + put_unaligned_le16(val << shift, buf); } static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { - *(u16 *)buf = val << shift; + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { - __be32 *b = buf; - - b[0] = cpu_to_be32(val << shift); + put_unaligned_be32(val << shift, buf); } static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) { - __le32 *b = buf; - - b[0] = cpu_to_le32(val << shift); + put_unaligned_le32(val << shift, buf); } static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { - *(u32 *)buf = val << shift; + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } #ifdef CONFIG_64BIT static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) { - __be64 *b = buf; - - b[0] = cpu_to_be64((u64)val << shift); + put_unaligned_be64((u64) val << shift, buf); } static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) { - __le64 *b = buf; - - b[0] = cpu_to_le64((u64)val << shift); + put_unaligned_le64((u64) val << shift, buf); } static void regmap_format_64_native(void *buf, unsigned int val, unsigned int shift) { - *(u64 *)buf = (u64)val << shift; + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); } #endif @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf) static unsigned int regmap_parse_16_be(const void *buf) { - const __be16 *b = buf; - - return be16_to_cpu(b[0]); + return get_unaligned_be16(buf); } static unsigned int regmap_parse_16_le(const void *buf) { - const __le16 *b = buf; - - return le16_to_cpu(b[0]); + return get_unaligned_le16(buf); } static void regmap_parse_16_be_inplace(void *buf) { - __be16 *b = buf; + u16 v = get_unaligned_be16(buf); - b[0] = be16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_16_le_inplace(void *buf) { - __le16 *b = buf; + u16 v = get_unaligned_le16(buf); - b[0] = le16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_16_native(const void *buf) { - return *(u16 *)buf; + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; } static unsigned int regmap_parse_24(const void *buf) @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf) static unsigned int regmap_parse_32_be(const void *buf) { - const __be32 *b = buf; - - return be32_to_cpu(b[0]); + return get_unaligned_be32(buf); } static unsigned int regmap_parse_32_le(const void *buf) { - const __le32 *b = buf; - - return le32_to_cpu(b[0]); + return get_unaligned_le32(buf); } static void regmap_parse_32_be_inplace(void *buf) { - __be32 *b = buf; + u32 v = get_unaligned_be32(buf); - b[0] = be32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_32_le_inplace(void *buf) { - __le32 *b = buf; + u32 v = get_unaligned_le32(buf); - b[0] = le32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_32_native(const void *buf) { - return *(u32 *)buf; + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #ifdef CONFIG_64BIT static unsigned int regmap_parse_64_be(const void *buf) { - const __be64 *b = buf; - - return be64_to_cpu(b[0]); + return get_unaligned_be64(buf); } static unsigned int regmap_parse_64_le(const void *buf) { - const __le64 *b = buf; - - return le64_to_cpu(b[0]); + return get_unaligned_le64(buf); } static void regmap_parse_64_be_inplace(void *buf) { - __be64 *b = buf; + u64 v = get_unaligned_be64(buf); - b[0] = be64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_64_le_inplace(void *buf) { - __le64 *b = buf; + u64 v = get_unaligned_le64(buf); - b[0] = le64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_64_native(const void *buf) { - return *(u64 *)buf; + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #endif @@ -639,7 +631,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev, const struct regmap_bus *bus, const struct regmap_config *config) { - struct device_node *np; + struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; enum regmap_endian endian; /* Retrieve the endianness specification from the regmap config */ @@ -649,22 +641,17 @@ enum regmap_endian regmap_get_val_endian(struct device *dev, if (endian != REGMAP_ENDIAN_DEFAULT) return endian; - /* If the dev and dev->of_node exist try to get endianness from DT */ - if (dev && dev->of_node) { - np = dev->of_node; - - /* Parse the device's DT node for an endianness specification */ - if (of_property_read_bool(np, "big-endian")) - endian = REGMAP_ENDIAN_BIG; - else if (of_property_read_bool(np, "little-endian")) - endian = REGMAP_ENDIAN_LITTLE; - else if (of_property_read_bool(np, "native-endian")) - endian = REGMAP_ENDIAN_NATIVE; - - /* If the endianness was specified in DT, use that */ - if (endian != REGMAP_ENDIAN_DEFAULT) - return endian; - } + /* If the firmware node exist try to get endianness from it */ + if (fwnode_property_read_bool(fwnode, "big-endian")) + endian = REGMAP_ENDIAN_BIG; + else if (fwnode_property_read_bool(fwnode, "little-endian")) + endian = REGMAP_ENDIAN_LITTLE; + else if (fwnode_property_read_bool(fwnode, "native-endian")) + endian = REGMAP_ENDIAN_NATIVE; + + /* If the endianness was specified in fwnode, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; /* Retrieve the endianness specification from the bus config */ if (bus && bus->val_format_endian_default) @@ -1357,6 +1344,7 @@ void regmap_exit(struct regmap *map) if (map->hwlock) hwspin_lock_free(map->hwlock); kfree_const(map->name); + kfree(map->patch); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -1371,7 +1359,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } @@ -2031,7 +2019,7 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); * A value of zero will be returned on success, a negative errno will * be returned in error cases. */ -int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, +int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force) { @@ -2944,8 +2932,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base); * @reg: Register to read from * @bits: Bits to test * - * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the - * tested bits is not set and 1 if all tested bits are set. + * Returns 0 if at least one of the tested bits is not set, 1 if all tested + * bits are set and a negative error number if the underlying regmap_read() + * fails. */ int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) { diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index e5eb27375416..010828fc785b 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -761,17 +761,13 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group); */ void software_node_unregister_node_group(const struct software_node **node_group) { - struct swnode *swnode; unsigned int i; if (!node_group) return; - for (i = 0; node_group[i]; i++) { - swnode = software_node_to_swnode(node_group[i]); - if (swnode) - fwnode_remove_software_node(&swnode->fwnode); - } + for (i = 0; node_group[i]; i++) + software_node_unregister(node_group[i]); } EXPORT_SYMBOL_GPL(software_node_unregister_node_group); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 4e033d4cc0dc..ad8d33c6077b 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -133,7 +133,7 @@ static int topology_remove_dev(unsigned int cpu) return 0; } -static int topology_sysfs_init(void) +static int __init topology_sysfs_init(void) { return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE, "base/topology:prepare", topology_add_dev, |