diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 3 | ||||
-rw-r--r-- | drivers/base/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/auxiliary.c | 279 | ||||
-rw-r--r-- | drivers/base/base.h | 1 | ||||
-rw-r--r-- | drivers/base/class.c | 2 | ||||
-rw-r--r-- | drivers/base/core.c | 568 | ||||
-rw-r--r-- | drivers/base/dd.c | 9 | ||||
-rw-r--r-- | drivers/base/devres.c | 2 | ||||
-rw-r--r-- | drivers/base/firmware_loader/fallback.c | 2 | ||||
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | drivers/base/platform.c | 474 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 130 | ||||
-rw-r--r-- | drivers/base/power/main.c | 8 | ||||
-rw-r--r-- | drivers/base/property.c | 52 | ||||
-rw-r--r-- | drivers/base/regmap/Kconfig | 6 | ||||
-rw-r--r-- | drivers/base/regmap/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 9 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 11 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-mmio.c | 90 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-sdw-mbq.c | 101 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-sdw.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 11 | ||||
-rw-r--r-- | drivers/base/regmap/trace.h | 1 | ||||
-rw-r--r-- | drivers/base/soc.c | 2 | ||||
-rw-r--r-- | drivers/base/swnode.c | 2 |
25 files changed, 1263 insertions, 506 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 8d7001712062..040be48ce046 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 menu "Generic Driver Options" +config AUXILIARY_BUS + bool + config UEVENT_HELPER bool "Support for uevent helper" help diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 41369fc7004f..5e7bf9669a81 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -7,6 +7,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ attribute_container.o transport_class.o \ topology.o container.o property.o cacheinfo.o \ swnode.o +obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ obj-$(CONFIG_ISA_BUS_API) += isa.o diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c new file mode 100644 index 000000000000..8336535f1e11 --- /dev/null +++ b/drivers/base/auxiliary.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 Intel Corporation + * + * Please see Documentation/driver-api/auxiliary_bus.rst for more information. + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/string.h> +#include <linux/auxiliary_bus.h> + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + int match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */ + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +static int auxiliary_match(struct device *dev, struct device_driver *drv) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv); + + return !!auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *name, *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX, + (int)(p - name), name); +} + +static const struct dev_pm_ops auxiliary_dev_pm_ops = { + SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) +}; + +static int auxiliary_bus_probe(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret) { + dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); + return ret; + } + + ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev)); + if (ret) + dev_pm_domain_detach(dev, true); + + return ret; +} + +static int auxiliary_bus_remove(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->remove) + auxdrv->remove(auxdev); + dev_pm_domain_detach(dev, true); + + return 0; +} + +static void auxiliary_bus_shutdown(struct device *dev) +{ + struct auxiliary_driver *auxdrv = NULL; + struct auxiliary_device *auxdev; + + if (dev->driver) { + auxdrv = to_auxiliary_drv(dev->driver); + auxdev = to_auxiliary_dev(dev); + } + + if (auxdrv && auxdrv->shutdown) + auxdrv->shutdown(auxdev); +} + +static struct bus_type auxiliary_bus_type = { + .name = "auxiliary", + .probe = auxiliary_bus_probe, + .remove = auxiliary_bus_remove, + .shutdown = auxiliary_bus_shutdown, + .match = auxiliary_match, + .uevent = auxiliary_uevent, + .pm = &auxiliary_dev_pm_ops, +}; + +/** + * auxiliary_device_init - check auxiliary_device and initialize + * @auxdev: auxiliary device struct + * + * This is the first step in the two-step process to register an + * auxiliary_device. + * + * When this function returns an error code, then the device_initialize will + * *not* have been performed, and the caller will be responsible to free any + * memory allocated for the auxiliary_device in the error path directly. + * + * It returns 0 on success. On success, the device_initialize has been + * performed. After this point any error unwinding will need to include a call + * to auxiliary_device_uninit(). In this post-initialize error scenario, a call + * to the device's .release callback will be triggered, and all memory clean-up + * is expected to be handled there. + */ +int auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &auxiliary_bus_type; + device_initialize(&auxdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(auxiliary_device_init); + +/** + * __auxiliary_device_add - add an auxiliary bus device + * @auxdev: auxiliary bus device to add to the bus + * @modname: name of the parent device's driver module + * + * This is the second step in the two-step process to register an + * auxiliary_device. + * + * This function must be called after a successful call to + * auxiliary_device_init(), which will perform the device_initialize. This + * means that if this returns an error code, then a call to + * auxiliary_device_uninit() must be performed so that the .release callback + * will be triggered to free the memory associated with the auxiliary_device. + * + * The expectation is that users will call the "auxiliary_device_add" macro so + * that the caller's KBUILD_MODNAME is automatically inserted for the modname + * parameter. Only if a user requires a custom name would this version be + * called directly. + */ +int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname) +{ + struct device *dev = &auxdev->dev; + int ret; + + if (!modname) { + dev_err(dev, "auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret) + dev_err(dev, "adding auxiliary device failed!: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(__auxiliary_device_add); + +/** + * auxiliary_find_device - auxiliary device iterator for locating a particular device. + * @start: Device to begin with + * @data: Data to pass to match function + * @match: Callback function to check device + * + * This function returns a reference to a device that is 'found' + * for later use, as determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + */ +struct auxiliary_device *auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct device *dev; + + dev = bus_find_device(&auxiliary_bus_type, start, data, match); + if (!dev) + return NULL; + + return to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(auxiliary_find_device); + +/** + * __auxiliary_driver_register - register a driver for auxiliary bus devices + * @auxdrv: auxiliary_driver structure + * @owner: owning module/driver + * @modname: KBUILD_MODNAME for parent driver + */ +int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + if (!auxdrv->driver.name) + return -ENOMEM; + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + return driver_register(&auxdrv->driver); +} +EXPORT_SYMBOL_GPL(__auxiliary_driver_register); + +/** + * auxiliary_driver_unregister - unregister a driver + * @auxdrv: auxiliary_driver structure + */ +void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(auxiliary_driver_unregister); + +static int __init auxiliary_bus_init(void) +{ + return bus_register(&auxiliary_bus_type); +} + +static void __exit auxiliary_bus_exit(void) +{ + bus_unregister(&auxiliary_bus_type); +} + +module_init(auxiliary_bus_init); +module_exit(auxiliary_bus_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Auxiliary Bus"); +MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>"); +MODULE_AUTHOR("Kiran Patil <kiran.patil@intel.com>"); diff --git a/drivers/base/base.h b/drivers/base/base.h index 91cfb8405abd..f5600a83124f 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -133,7 +133,6 @@ extern void device_release_driver_internal(struct device *dev, struct device *parent); extern void driver_detach(struct device_driver *drv); -extern int driver_probe_device(struct device_driver *drv, struct device *dev); extern void driver_deferred_probe_del(struct device *dev); extern void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf); diff --git a/drivers/base/class.c b/drivers/base/class.c index c3451481194e..7476f393df97 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -210,7 +210,7 @@ static void class_create_release(struct class *cls) } /** - * class_create - create a struct class structure + * __class_create - create a struct class structure * @owner: pointer to the module that is to "own" this struct class * @name: pointer to a string for the name of this class. * @key: the lock_class_key for this class; used by mutex lock debugging diff --git a/drivers/base/core.c b/drivers/base/core.c index d661ada1518f..14f165816742 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -46,15 +46,108 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup); #endif /* Device links support. */ -static LIST_HEAD(wait_for_suppliers); -static DEFINE_MUTEX(wfs_lock); static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; -static unsigned int defer_fw_devlink_count; -static LIST_HEAD(deferred_fw_devlink); -static DEFINE_MUTEX(defer_fw_devlink_lock); +static DEFINE_MUTEX(fwnode_link_lock); static bool fw_devlink_is_permissive(void); +/** + * fwnode_link_add - Create a link between two fwnode_handles. + * @con: Consumer end of the link. + * @sup: Supplier end of the link. + * + * Create a fwnode link between fwnode handles @con and @sup. The fwnode link + * represents the detail that the firmware lists @sup fwnode as supplying a + * resource to @con. + * + * The driver core will use the fwnode link to create a device link between the + * two device objects corresponding to @con and @sup when they are created. The + * driver core will automatically delete the fwnode link between @con and @sup + * after doing that. + * + * Attempts to create duplicate links between the same pair of fwnode handles + * are ignored and there is no reference counting. + */ +int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) +{ + struct fwnode_link *link; + int ret = 0; + + mutex_lock(&fwnode_link_lock); + + list_for_each_entry(link, &sup->consumers, s_hook) + if (link->consumer == con) + goto out; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto out; + } + + link->supplier = sup; + INIT_LIST_HEAD(&link->s_hook); + link->consumer = con; + INIT_LIST_HEAD(&link->c_hook); + + list_add(&link->s_hook, &sup->consumers); + list_add(&link->c_hook, &con->suppliers); +out: + mutex_unlock(&fwnode_link_lock); + + return ret; +} + +/** + * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. + * @fwnode: fwnode whose supplier links need to be deleted + * + * Deletes all supplier links connecting directly to @fwnode. + */ +static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) +{ + struct fwnode_link *link, *tmp; + + mutex_lock(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { + list_del(&link->s_hook); + list_del(&link->c_hook); + kfree(link); + } + mutex_unlock(&fwnode_link_lock); +} + +/** + * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. + * @fwnode: fwnode whose consumer links need to be deleted + * + * Deletes all consumer links connecting directly to @fwnode. + */ +static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) +{ + struct fwnode_link *link, *tmp; + + mutex_lock(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { + list_del(&link->s_hook); + list_del(&link->c_hook); + kfree(link); + } + mutex_unlock(&fwnode_link_lock); +} + +/** + * fwnode_links_purge - Delete all links connected to a fwnode_handle. + * @fwnode: fwnode whose links needs to be deleted + * + * Deletes all links connecting directly to a fwnode. + */ +void fwnode_links_purge(struct fwnode_handle *fwnode) +{ + fwnode_links_purge_suppliers(fwnode); + fwnode_links_purge_consumers(fwnode); +} + #ifdef CONFIG_SRCU static DEFINE_MUTEX(device_links_lock); DEFINE_STATIC_SRCU(device_links_srcu); @@ -468,7 +561,7 @@ postcore_initcall(devlink_class_init); * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the * runtime PM framework to take the link into account. Second, if the * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will - * be forced into the active metastate and reference-counted upon the creation + * be forced into the active meta state and reference-counted upon the creation * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * @@ -491,7 +584,7 @@ postcore_initcall(devlink_class_init); * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can - * be used to request the driver core to automaticall probe for a consmer + * be used to request the driver core to automatically probe for a consumer * driver after successfully binding a driver to the supplier device. * * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, @@ -556,6 +649,17 @@ struct device_link *device_link_add(struct device *consumer, } /* + * SYNC_STATE_ONLY links are useless once a consumer device has probed. + * So, only create it if the consumer hasn't probed yet. + */ + if (flags & DL_FLAG_SYNC_STATE_ONLY && + consumer->links.status != DL_DEV_NO_DRIVER && + consumer->links.status != DL_DEV_PROBING) { + link = NULL; + goto out; + } + + /* * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. @@ -697,74 +801,6 @@ out: } EXPORT_SYMBOL_GPL(device_link_add); -/** - * device_link_wait_for_supplier - Add device to wait_for_suppliers list - * @consumer: Consumer device - * - * Marks the @consumer device as waiting for suppliers to become available by - * adding it to the wait_for_suppliers list. The consumer device will never be - * probed until it's removed from the wait_for_suppliers list. - * - * The caller is responsible for adding the links to the supplier devices once - * they are available and removing the @consumer device from the - * wait_for_suppliers list once links to all the suppliers have been created. - * - * This function is NOT meant to be called from the probe function of the - * consumer but rather from code that creates/adds the consumer device. - */ -static void device_link_wait_for_supplier(struct device *consumer, - bool need_for_probe) -{ - mutex_lock(&wfs_lock); - list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); - consumer->links.need_for_probe = need_for_probe; - mutex_unlock(&wfs_lock); -} - -static void device_link_wait_for_mandatory_supplier(struct device *consumer) -{ - device_link_wait_for_supplier(consumer, true); -} - -static void device_link_wait_for_optional_supplier(struct device *consumer) -{ - device_link_wait_for_supplier(consumer, false); -} - -/** - * device_link_add_missing_supplier_links - Add links from consumer devices to - * supplier devices, leaving any - * consumer with inactive suppliers on - * the wait_for_suppliers list - * - * Loops through all consumers waiting on suppliers and tries to add all their - * supplier links. If that succeeds, the consumer device is removed from - * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers - * list. Devices left on the wait_for_suppliers list will not be probed. - * - * The fwnode add_links callback is expected to return 0 if it has found and - * added all the supplier links for the consumer device. It should return an - * error if it isn't able to do so. - * - * The caller of device_link_wait_for_supplier() is expected to call this once - * it's aware of potential suppliers becoming available. - */ -static void device_link_add_missing_supplier_links(void) -{ - struct device *dev, *tmp; - - mutex_lock(&wfs_lock); - list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, - links.needs_suppliers) { - int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); - if (!ret) - list_del_init(&dev->links.needs_suppliers); - else if (ret != -ENODEV || fw_devlink_is_permissive()) - dev->links.need_for_probe = false; - } - mutex_unlock(&wfs_lock); -} - #ifdef CONFIG_SRCU static void __device_link_del(struct kref *kref) { @@ -890,13 +926,13 @@ int device_links_check_suppliers(struct device *dev) * Device waiting for supplier to become available is not allowed to * probe. */ - mutex_lock(&wfs_lock); - if (!list_empty(&dev->links.needs_suppliers) && - dev->links.need_for_probe) { - mutex_unlock(&wfs_lock); + mutex_lock(&fwnode_link_lock); + if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && + !fw_devlink_is_permissive()) { + mutex_unlock(&fwnode_link_lock); return -EPROBE_DEFER; } - mutex_unlock(&wfs_lock); + mutex_unlock(&fwnode_link_lock); device_links_write_lock(); @@ -960,11 +996,11 @@ static void __device_links_queue_sync_state(struct device *dev, */ dev->state_synced = true; - if (WARN_ON(!list_empty(&dev->links.defer_hook))) + if (WARN_ON(!list_empty(&dev->links.defer_sync))) return; get_device(dev); - list_add_tail(&dev->links.defer_hook, list); + list_add_tail(&dev->links.defer_sync, list); } /** @@ -982,8 +1018,8 @@ static void device_links_flush_sync_list(struct list_head *list, { struct device *dev, *tmp; - list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { - list_del_init(&dev->links.defer_hook); + list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { + list_del_init(&dev->links.defer_sync); if (dev != dont_lock_dev) device_lock(dev); @@ -1021,12 +1057,12 @@ void device_links_supplier_sync_state_resume(void) if (defer_sync_state_count) goto out; - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { /* * Delete from deferred_sync list before queuing it to - * sync_list because defer_hook is used for both lists. + * sync_list because defer_sync is used for both lists. */ - list_del_init(&dev->links.defer_hook); + list_del_init(&dev->links.defer_sync); __device_links_queue_sync_state(dev, &sync_list); } out: @@ -1044,8 +1080,8 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) - list_add_tail(&sup->links.defer_hook, &deferred_sync); + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) + list_add_tail(&sup->links.defer_sync, &deferred_sync); } static void device_link_drop_managed(struct device_link *link) @@ -1062,10 +1098,7 @@ static ssize_t waiting_for_supplier_show(struct device *dev, bool val; device_lock(dev); - mutex_lock(&wfs_lock); - val = !list_empty(&dev->links.needs_suppliers) - && dev->links.need_for_probe; - mutex_unlock(&wfs_lock); + val = !list_empty(&dev->fwnode->suppliers); device_unlock(dev); return sysfs_emit(buf, "%u\n", val); } @@ -1092,9 +1125,8 @@ void device_links_driver_bound(struct device *dev) * the device links it needs to or make new device links as it needs * them. So, it no longer needs to wait on any suppliers. */ - mutex_lock(&wfs_lock); - list_del_init(&dev->links.needs_suppliers); - mutex_unlock(&wfs_lock); + if (dev->fwnode && dev->fwnode->dev == dev) + fwnode_links_purge_suppliers(dev->fwnode); device_remove_file(dev, &dev_attr_waiting_for_supplier); device_links_write_lock(); @@ -1275,7 +1307,7 @@ void device_links_driver_cleanup(struct device *dev) WRITE_ONCE(link->status, DL_STATE_DORMANT); } - list_del_init(&dev->links.defer_hook); + list_del_init(&dev->links.defer_sync); __device_links_no_driver(dev); device_links_write_unlock(); @@ -1385,10 +1417,6 @@ static void device_links_purge(struct device *dev) if (dev->class == &devlink_class) return; - mutex_lock(&wfs_lock); - list_del(&dev->links.needs_suppliers); - mutex_unlock(&wfs_lock); - /* * Delete all of the remaining links from this device to any other * devices (either consumers or suppliers). @@ -1439,139 +1467,267 @@ static bool fw_devlink_is_permissive(void) return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; } -static void fw_devlink_link_device(struct device *dev) +static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) { - int fw_ret; - - if (!fw_devlink_flags) + if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED) return; - mutex_lock(&defer_fw_devlink_lock); - if (!defer_fw_devlink_count) - device_link_add_missing_supplier_links(); + fwnode_call_int_op(fwnode, add_links); + fwnode->flags |= FWNODE_FLAG_LINKS_ADDED; +} + +static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *child = NULL; + + fw_devlink_parse_fwnode(fwnode); + + while ((child = fwnode_get_next_available_child_node(fwnode, child))) + fw_devlink_parse_fwtree(child); +} + +/** + * fw_devlink_create_devlink - Create a device link from a consumer to fwnode + * @con - Consumer device for the device link + * @sup_handle - fwnode handle of supplier + * + * This function will try to create a device link between the consumer device + * @con and the supplier device represented by @sup_handle. + * + * The supplier has to be provided as a fwnode because incorrect cycles in + * fwnode links can sometimes cause the supplier device to never be created. + * This function detects such cases and returns an error if it cannot create a + * device link from the consumer to a missing supplier. + * + * Returns, + * 0 on successfully creating a device link + * -EINVAL if the device link cannot be created as expected + * -EAGAIN if the device link cannot be created right now, but it may be + * possible to do that in the future + */ +static int fw_devlink_create_devlink(struct device *con, + struct fwnode_handle *sup_handle, u32 flags) +{ + struct device *sup_dev; + int ret = 0; + + sup_dev = get_dev_from_fwnode(sup_handle); + if (sup_dev) { + /* + * If this fails, it is due to cycles in device links. Just + * give up on this link and treat it as invalid. + */ + if (!device_link_add(con, sup_dev, flags)) + ret = -EINVAL; + + goto out; + } /* - * The device's fwnode not having add_links() doesn't affect if other - * consumers can find this device as a supplier. So, this check is - * intentionally placed after device_link_add_missing_supplier_links(). + * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports + * cycles. So cycle detection isn't necessary and shouldn't be + * done. */ - if (!fwnode_has_op(dev->fwnode, add_links)) - goto out; + if (flags & DL_FLAG_SYNC_STATE_ONLY) + return -EAGAIN; /* - * If fw_devlink is being deferred, assume all devices have mandatory - * suppliers they need to link to later. Then, when the fw_devlink is - * resumed, all these devices will get a chance to try and link to any - * suppliers they have. + * If we can't find the supplier device from its fwnode, it might be + * due to a cyclic dependency between fwnodes. Some of these cycles can + * be broken by applying logic. Check for these types of cycles and + * break them so that devices in the cycle probe properly. + * + * If the supplier's parent is dependent on the consumer, then + * the consumer-supplier dependency is a false dependency. So, + * treat it as an invalid link. */ - if (!defer_fw_devlink_count) { - fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); - if (fw_ret == -ENODEV && fw_devlink_is_permissive()) - fw_ret = -EAGAIN; + sup_dev = fwnode_get_next_parent_dev(sup_handle); + if (sup_dev && device_is_dependent(con, sup_dev)) { + dev_dbg(con, "Not linking to %pfwP - False link\n", + sup_handle); + ret = -EINVAL; } else { - fw_ret = -ENODEV; /* - * defer_hook is not used to add device to deferred_sync list - * until device is bound. Since deferred fw devlink also blocks - * probing, same list hook can be used for deferred_fw_devlink. + * Can't check for cycles or no cycles. So let's try + * again later. */ - list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); + ret = -EAGAIN; } - if (fw_ret == -ENODEV) - device_link_wait_for_mandatory_supplier(dev); - else if (fw_ret) - device_link_wait_for_optional_supplier(dev); - out: - mutex_unlock(&defer_fw_devlink_lock); + put_device(sup_dev); + return ret; } /** - * fw_devlink_pause - Pause parsing of fwnode to create device links - * - * Calling this function defers any fwnode parsing to create device links until - * fw_devlink_resume() is called. Both these functions are ref counted and the - * caller needs to match the calls. - * - * While fw_devlink is paused: - * - Any device that is added won't have its fwnode parsed to create device - * links. - * - The probe of the device will also be deferred during this period. - * - Any devices that were already added, but waiting for suppliers won't be - * able to link to newly added devices. - * - * Once fw_devlink_resume(): - * - All the fwnodes that was not parsed will be parsed. - * - All the devices that were deferred probing will be reattempted if they - * aren't waiting for any more suppliers. - * - * This pair of functions, is mainly meant to optimize the parsing of fwnodes - * when a lot of devices that need to link to each other are added in a short - * interval of time. For example, adding all the top level devices in a system. + * __fw_devlink_link_to_consumers - Create device links to consumers of a device + * @dev - Device that needs to be linked to its consumers * - * For example, if N devices are added and: - * - All the consumers are added before their suppliers - * - All the suppliers of the N devices are part of the N devices + * This function looks at all the consumer fwnodes of @dev and creates device + * links between the consumer device and @dev (supplier). * - * Then: + * If the consumer device has not been added yet, then this function creates a + * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device + * of the consumer fwnode. This is necessary to make sure @dev doesn't get a + * sync_state() callback before the real consumer device gets to be added and + * then probed. * - * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device - * will only need one parsing of its fwnode because it is guaranteed to find - * all the supplier devices already registered and ready to link to. It won't - * have to do another pass later to find one or more suppliers it couldn't - * find in the first parse of the fwnode. So, we'll only need O(N) fwnode - * parses. - * - * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would - * end up doing O(N^2) parses of fwnodes because every device that's added is - * guaranteed to trigger a parse of the fwnode of every device added before - * it. This O(N^2) parse is made worse by the fact that when a fwnode of a - * device is parsed, all it descendant devices might need to have their - * fwnodes parsed too (even if the devices themselves aren't added). + * Once device links are created from the real consumer to @dev (supplier), the + * fwnode links are deleted. */ -void fw_devlink_pause(void) +static void __fw_devlink_link_to_consumers(struct device *dev) { - mutex_lock(&defer_fw_devlink_lock); - defer_fw_devlink_count++; - mutex_unlock(&defer_fw_devlink_lock); + struct fwnode_handle *fwnode = dev->fwnode; + struct fwnode_link *link, *tmp; + + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { + u32 dl_flags = fw_devlink_get_flags(); + struct device *con_dev; + bool own_link = true; + int ret; + + con_dev = get_dev_from_fwnode(link->consumer); + /* + * If consumer device is not available yet, make a "proxy" + * SYNC_STATE_ONLY link from the consumer's parent device to + * the supplier device. This is necessary to make sure the + * supplier doesn't get a sync_state() callback before the real + * consumer can create a device link to the supplier. + * + * This proxy link step is needed to handle the case where the + * consumer's parent device is added before the supplier. + */ + if (!con_dev) { + con_dev = fwnode_get_next_parent_dev(link->consumer); + /* + * However, if the consumer's parent device is also the + * parent of the supplier, don't create a + * consumer-supplier link from the parent to its child + * device. Such a dependency is impossible. + */ + if (con_dev && + fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) { + put_device(con_dev); + con_dev = NULL; + } else { + own_link = false; + dl_flags = DL_FLAG_SYNC_STATE_ONLY; + } + } + + if (!con_dev) + continue; + + ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); + put_device(con_dev); + if (!own_link || ret == -EAGAIN) + continue; + + list_del(&link->s_hook); + list_del(&link->c_hook); + kfree(link); + } } -/** fw_devlink_resume - Resume parsing of fwnode to create device links +/** + * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device + * @dev - The consumer device that needs to be linked to its suppliers + * @fwnode - Root of the fwnode tree that is used to create device links + * + * This function looks at all the supplier fwnodes of fwnode tree rooted at + * @fwnode and creates device links between @dev (consumer) and all the + * supplier devices of the entire fwnode tree at @fwnode. * - * This function is used in conjunction with fw_devlink_pause() and is ref - * counted. See documentation for fw_devlink_pause() for more details. + * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev + * and the real suppliers of @dev. Once these device links are created, the + * fwnode links are deleted. When such device links are successfully created, + * this function is called recursively on those supplier devices. This is + * needed to detect and break some invalid cycles in fwnode links. See + * fw_devlink_create_devlink() for more details. + * + * In addition, it also looks at all the suppliers of the entire fwnode tree + * because some of the child devices of @dev that have not been added yet + * (because @dev hasn't probed) might already have their suppliers added to + * driver core. So, this function creates SYNC_STATE_ONLY device links between + * @dev (consumer) and these suppliers to make sure they don't execute their + * sync_state() callbacks before these child devices have a chance to create + * their device links. The fwnode links that correspond to the child devices + * aren't delete because they are needed later to create the device links + * between the real consumer and supplier devices. */ -void fw_devlink_resume(void) +static void __fw_devlink_link_to_suppliers(struct device *dev, + struct fwnode_handle *fwnode) { - struct device *dev, *tmp; - LIST_HEAD(probe_list); + bool own_link = (dev->fwnode == fwnode); + struct fwnode_link *link, *tmp; + struct fwnode_handle *child = NULL; + u32 dl_flags; - mutex_lock(&defer_fw_devlink_lock); - if (!defer_fw_devlink_count) { - WARN(true, "Unmatched fw_devlink pause/resume!"); - goto out; - } + if (own_link) + dl_flags = fw_devlink_get_flags(); + else + dl_flags = DL_FLAG_SYNC_STATE_ONLY; - defer_fw_devlink_count--; - if (defer_fw_devlink_count) - goto out; + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { + int ret; + struct device *sup_dev; + struct fwnode_handle *sup = link->supplier; - device_link_add_missing_supplier_links(); - list_splice_tail_init(&deferred_fw_devlink, &probe_list); -out: - mutex_unlock(&defer_fw_devlink_lock); + ret = fw_devlink_create_devlink(dev, sup, dl_flags); + if (!own_link || ret == -EAGAIN) + continue; + + list_del(&link->s_hook); + list_del(&link->c_hook); + kfree(link); + + /* If no device link was created, nothing more to do. */ + if (ret) + continue; + + /* + * If a device link was successfully created to a supplier, we + * now need to try and link the supplier to all its suppliers. + * + * This is needed to detect and delete false dependencies in + * fwnode links that haven't been converted to a device link + * yet. See comments in fw_devlink_create_devlink() for more + * details on the false dependency. + * + * Without deleting these false dependencies, some devices will + * never probe because they'll keep waiting for their false + * dependency fwnode links to be converted to device links. + */ + sup_dev = get_dev_from_fwnode(sup); + __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); + put_device(sup_dev); + } /* - * bus_probe_device() can cause new devices to get added and they'll - * try to grab defer_fw_devlink_lock. So, this needs to be done outside - * the defer_fw_devlink_lock. + * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of + * all the descendants. This proxy link step is needed to handle the + * case where the supplier is added before the consumer's parent device + * (@dev). */ - list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { - list_del_init(&dev->links.defer_hook); - bus_probe_device(dev); - } + while ((child = fwnode_get_next_available_child_node(fwnode, child))) + __fw_devlink_link_to_suppliers(dev, child); } + +static void fw_devlink_link_device(struct device *dev) +{ + struct fwnode_handle *fwnode = dev->fwnode; + + if (!fw_devlink_flags) + return; + + fw_devlink_parse_fwtree(fwnode); + + mutex_lock(&fwnode_link_lock); + __fw_devlink_link_to_consumers(dev); + __fw_devlink_link_to_suppliers(dev, fwnode); + mutex_unlock(&fwnode_link_lock); +} + /* Device links support end. */ int (*platform_notify)(struct device *dev) = NULL; @@ -2196,7 +2352,7 @@ static int device_add_attrs(struct device *dev) goto err_remove_dev_groups; } - if (fw_devlink_flags && !fw_devlink_is_permissive()) { + if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) { error = device_create_file(dev, &dev_attr_waiting_for_supplier); if (error) goto err_remove_dev_online; @@ -2427,8 +2583,7 @@ void device_initialize(struct device *dev) #endif INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); - INIT_LIST_HEAD(&dev->links.needs_suppliers); - INIT_LIST_HEAD(&dev->links.defer_hook); + INIT_LIST_HEAD(&dev->links.defer_sync); dev->links.status = DL_DEV_NO_DRIVER; } EXPORT_SYMBOL_GPL(device_initialize); @@ -4259,6 +4414,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) * * Set the device's firmware node pointer to @fwnode, but if a secondary * firmware node of the device is present, preserve it. + * + * Valid fwnode cases are: + * - primary --> secondary --> -ENODEV + * - primary --> NULL + * - secondary --> -ENODEV + * - NULL */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { @@ -4277,8 +4438,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } else { if (fwnode_is_primary(fn)) { dev->fwnode = fn->secondary; + /* Set fn->secondary = NULL, so fn remains the primary fwnode */ if (!(parent && fn == parent->fwnode)) - fn->secondary = ERR_PTR(-ENODEV); + fn->secondary = NULL; } else { dev->fwnode = NULL; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 148e81969e04..2f32f38a11ed 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -371,6 +371,13 @@ static void driver_bound(struct device *dev) device_pm_check_callbacks(dev); /* + * Reorder successfully probed devices to the end of the device list. + * This ensures that suspend/resume order matches probe order, which + * is usually what drivers rely on. + */ + device_pm_move_to_tail(dev); + + /* * Make sure the device is no longer in one of the deferred lists and * kick off retrying all pending devices */ @@ -717,7 +724,7 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); * * If the device has a parent, runtime-resume the parent before driver probing. */ -int driver_probe_device(struct device_driver *drv, struct device *dev) +static int driver_probe_device(struct device_driver *drv, struct device *dev) { int ret = 0; diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 586e9a75c840..fb9d5289a620 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -149,7 +149,7 @@ void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid EXPORT_SYMBOL_GPL(__devres_alloc_node); #else /** - * devres_alloc - Allocate device resource data + * devres_alloc_node - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 4dec4b79ae06..91899d185e31 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -128,7 +128,7 @@ static ssize_t timeout_show(struct class *class, struct class_attribute *attr, } /** - * firmware_timeout_store() - set number of seconds to wait for firmware + * timeout_store() - set number of seconds to wait for firmware * @class: device class pointer * @attr: device attribute pointer * @buf: buffer to scan for timeout value diff --git a/drivers/base/node.c b/drivers/base/node.c index 6ffa470e2984..04f71c7bc3f8 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -450,7 +450,7 @@ static ssize_t node_read_meminfo(struct device *dev, #ifdef CONFIG_SHADOW_CALL_STACK nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), #endif - nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), + nid, K(node_page_state(pgdat, NR_PAGETABLE)), nid, 0UL, nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 74c97b65048c..8456d8384ac8 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -65,6 +65,21 @@ struct resource *platform_get_resource(struct platform_device *dev, } EXPORT_SYMBOL_GPL(platform_get_resource); +struct resource *platform_get_mem_or_io(struct platform_device *dev, + unsigned int num) +{ + u32 i; + + for (i = 0; i < dev->num_resources; i++) { + struct resource *r = &dev->resource[i]; + + if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) + return r; + } + return NULL; +} +EXPORT_SYMBOL_GPL(platform_get_mem_or_io); + #ifdef CONFIG_HAS_IOMEM /** * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a @@ -866,62 +881,6 @@ err: } EXPORT_SYMBOL_GPL(platform_device_register_full); -static int platform_drv_probe(struct device *_dev) -{ - struct platform_driver *drv = to_platform_driver(_dev->driver); - struct platform_device *dev = to_platform_device(_dev); - int ret; - - ret = of_clk_set_defaults(_dev->of_node, false); - if (ret < 0) - return ret; - - ret = dev_pm_domain_attach(_dev, true); - if (ret) - goto out; - - if (drv->probe) { - ret = drv->probe(dev); - if (ret) - dev_pm_domain_detach(_dev, true); - } - -out: - if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { - dev_warn(_dev, "probe deferral not supported\n"); - ret = -ENXIO; - } - - return ret; -} - -static int platform_drv_probe_fail(struct device *_dev) -{ - return -ENXIO; -} - -static int platform_drv_remove(struct device *_dev) -{ - struct platform_driver *drv = to_platform_driver(_dev->driver); - struct platform_device *dev = to_platform_device(_dev); - int ret = 0; - - if (drv->remove) - ret = drv->remove(dev); - dev_pm_domain_detach(_dev, true); - - return ret; -} - -static void platform_drv_shutdown(struct device *_dev) -{ - struct platform_driver *drv = to_platform_driver(_dev->driver); - struct platform_device *dev = to_platform_device(_dev); - - if (drv->shutdown) - drv->shutdown(dev); -} - /** * __platform_driver_register - register a driver for platform-level devices * @drv: platform driver structure @@ -932,9 +891,6 @@ int __platform_driver_register(struct platform_driver *drv, { drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; - drv->driver.probe = platform_drv_probe; - drv->driver.remove = platform_drv_remove; - drv->driver.shutdown = platform_drv_shutdown; return driver_register(&drv->driver); } @@ -950,6 +906,11 @@ void platform_driver_unregister(struct platform_driver *drv) } EXPORT_SYMBOL_GPL(platform_driver_unregister); +static int platform_probe_fail(struct platform_device *pdev) +{ + return -ENXIO; +} + /** * __platform_driver_probe - register driver for non-hotpluggable device * @drv: platform driver structure @@ -1010,10 +971,9 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, * new devices fail. */ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); - drv->probe = NULL; + drv->probe = platform_probe_fail; if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) retval = -ENODEV; - drv->driver.probe = platform_drv_probe_fail; spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); if (code != retval) @@ -1140,129 +1100,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers, } EXPORT_SYMBOL_GPL(platform_unregister_drivers); -/* modalias support enables more hands-off userspace setup: - * (a) environment variable lets new-style hotplug events work once system is - * fully running: "modprobe $MODALIAS" - * (b) sysfs attribute lets new-style coldplug recover from hotplug events - * mishandled before system is fully running: "modprobe $(cat modalias)" - */ -static ssize_t modalias_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - int len; - - len = of_device_modalias(dev, buf, PAGE_SIZE); - if (len != -ENODEV) - return len; - - len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); - if (len != -ENODEV) - return len; - - return sysfs_emit(buf, "platform:%s\n", pdev->name); -} -static DEVICE_ATTR_RO(modalias); - -static ssize_t driver_override_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old, *cp; - - /* We need to keep extra room for a newline */ - if (count >= (PAGE_SIZE - 1)) - return -EINVAL; - - driver_override = kstrndup(buf, count, GFP_KERNEL); - if (!driver_override) - return -ENOMEM; - - cp = strchr(driver_override, '\n'); - if (cp) - *cp = '\0'; - - device_lock(dev); - old = pdev->driver_override; - if (strlen(driver_override)) { - pdev->driver_override = driver_override; - } else { - kfree(driver_override); - pdev->driver_override = NULL; - } - device_unlock(dev); - - kfree(old); - - return count; -} - -static ssize_t driver_override_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - ssize_t len; - - device_lock(dev); - len = sysfs_emit(buf, "%s\n", pdev->driver_override); - device_unlock(dev); - - return len; -} -static DEVICE_ATTR_RW(driver_override); - -static ssize_t numa_node_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "%d\n", dev_to_node(dev)); -} -static DEVICE_ATTR_RO(numa_node); - -static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, - int n) -{ - struct device *dev = container_of(kobj, typeof(*dev), kobj); - - if (a == &dev_attr_numa_node.attr && - dev_to_node(dev) == NUMA_NO_NODE) - return 0; - - return a->mode; -} - -static struct attribute *platform_dev_attrs[] = { - &dev_attr_modalias.attr, - &dev_attr_numa_node.attr, - &dev_attr_driver_override.attr, - NULL, -}; - -static struct attribute_group platform_dev_group = { - .attrs = platform_dev_attrs, - .is_visible = platform_dev_attrs_visible, -}; -__ATTRIBUTE_GROUPS(platform_dev); - -static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct platform_device *pdev = to_platform_device(dev); - int rc; - - /* Some devices have extra OF data and an OF-style MODALIAS */ - rc = of_device_uevent_modalias(dev, env); - if (rc != -ENODEV) - return rc; - - rc = acpi_device_uevent_modalias(dev, env); - if (rc != -ENODEV) - return rc; - - add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, - pdev->name); - return 0; -} - static const struct platform_device_id *platform_match_id( const struct platform_device_id *id, struct platform_device *pdev) @@ -1277,44 +1114,6 @@ static const struct platform_device_id *platform_match_id( return NULL; } -/** - * platform_match - bind platform device to platform driver. - * @dev: device. - * @drv: driver. - * - * Platform device IDs are assumed to be encoded like this: - * "<name><instance>", where <name> is a short description of the type of - * device, like "pci" or "floppy", and <instance> is the enumerated - * instance of the device, like '0' or '42'. Driver IDs are simply - * "<name>". So, extract the <name> from the platform_device structure, - * and compare it against the name of the driver. Return whether they match - * or not. - */ -static int platform_match(struct device *dev, struct device_driver *drv) -{ - struct platform_device *pdev = to_platform_device(dev); - struct platform_driver *pdrv = to_platform_driver(drv); - - /* When driver_override is set, only bind to the matching driver */ - if (pdev->driver_override) - return !strcmp(pdev->driver_override, drv->name); - - /* Attempt an OF style match first */ - if (of_driver_match_device(dev, drv)) - return 1; - - /* Then try ACPI style match */ - if (acpi_driver_match_device(dev, drv)) - return 1; - - /* Then try to match against the id table */ - if (pdrv->id_table) - return platform_match_id(pdrv->id_table, pdev) != NULL; - - /* fall-back to driver name match */ - return (strcmp(pdev->name, drv->name) == 0); -} - #ifdef CONFIG_PM_SLEEP static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) @@ -1459,6 +1258,234 @@ int platform_pm_restore(struct device *dev) #endif /* CONFIG_HIBERNATE_CALLBACKS */ +/* modalias support enables more hands-off userspace setup: + * (a) environment variable lets new-style hotplug events work once system is + * fully running: "modprobe $MODALIAS" + * (b) sysfs attribute lets new-style coldplug recover from hotplug events + * mishandled before system is fully running: "modprobe $(cat modalias)" + */ +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + int len; + + len = of_device_modalias(dev, buf, PAGE_SIZE); + if (len != -ENODEV) + return len; + + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); + if (len != -ENODEV) + return len; + + return sysfs_emit(buf, "platform:%s\n", pdev->name); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", dev_to_node(dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + ssize_t len; + + device_lock(dev); + len = sysfs_emit(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + + return len; +} + +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + char *driver_override, *old, *cp; + + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + device_lock(dev); + old = pdev->driver_override; + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + device_unlock(dev); + + kfree(old); + + return count; +} +static DEVICE_ATTR_RW(driver_override); + +static struct attribute *platform_dev_attrs[] = { + &dev_attr_modalias.attr, + &dev_attr_numa_node.attr, + &dev_attr_driver_override.attr, + NULL, +}; + +static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + + if (a == &dev_attr_numa_node.attr && + dev_to_node(dev) == NUMA_NO_NODE) + return 0; + + return a->mode; +} + +static struct attribute_group platform_dev_group = { + .attrs = platform_dev_attrs, + .is_visible = platform_dev_attrs_visible, +}; +__ATTRIBUTE_GROUPS(platform_dev); + + +/** + * platform_match - bind platform device to platform driver. + * @dev: device. + * @drv: driver. + * + * Platform device IDs are assumed to be encoded like this: + * "<name><instance>", where <name> is a short description of the type of + * device, like "pci" or "floppy", and <instance> is the enumerated + * instance of the device, like '0' or '42'. Driver IDs are simply + * "<name>". So, extract the <name> from the platform_device structure, + * and compare it against the name of the driver. Return whether they match + * or not. + */ +static int platform_match(struct device *dev, struct device_driver *drv) +{ + struct platform_device *pdev = to_platform_device(dev); + struct platform_driver *pdrv = to_platform_driver(drv); + + /* When driver_override is set, only bind to the matching driver */ + if (pdev->driver_override) + return !strcmp(pdev->driver_override, drv->name); + + /* Attempt an OF style match first */ + if (of_driver_match_device(dev, drv)) + return 1; + + /* Then try ACPI style match */ + if (acpi_driver_match_device(dev, drv)) + return 1; + + /* Then try to match against the id table */ + if (pdrv->id_table) + return platform_match_id(pdrv->id_table, pdev) != NULL; + + /* fall-back to driver name match */ + return (strcmp(pdev->name, drv->name) == 0); +} + +static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct platform_device *pdev = to_platform_device(dev); + int rc; + + /* Some devices have extra OF data and an OF-style MODALIAS */ + rc = of_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; + + rc = acpi_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; + + add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, + pdev->name); + return 0; +} + +static int platform_probe(struct device *_dev) +{ + struct platform_driver *drv = to_platform_driver(_dev->driver); + struct platform_device *dev = to_platform_device(_dev); + int ret; + + /* + * A driver registered using platform_driver_probe() cannot be bound + * again later because the probe function usually lives in __init code + * and so is gone. For these drivers .probe is set to + * platform_probe_fail in __platform_driver_probe(). Don't even prepare + * clocks and PM domains for these to match the traditional behaviour. + */ + if (unlikely(drv->probe == platform_probe_fail)) + return -ENXIO; + + ret = of_clk_set_defaults(_dev->of_node, false); + if (ret < 0) + return ret; + + ret = dev_pm_domain_attach(_dev, true); + if (ret) + goto out; + + if (drv->probe) { + ret = drv->probe(dev); + if (ret) + dev_pm_domain_detach(_dev, true); + } + +out: + if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { + dev_warn(_dev, "probe deferral not supported\n"); + ret = -ENXIO; + } + + return ret; +} + +static int platform_remove(struct device *_dev) +{ + struct platform_driver *drv = to_platform_driver(_dev->driver); + struct platform_device *dev = to_platform_device(_dev); + int ret = 0; + + if (drv->remove) + ret = drv->remove(dev); + dev_pm_domain_detach(_dev, true); + + return ret; +} + +static void platform_shutdown(struct device *_dev) +{ + struct platform_device *dev = to_platform_device(_dev); + struct platform_driver *drv; + + if (!_dev->driver) + return; + + drv = to_platform_driver(_dev->driver); + if (drv->shutdown) + drv->shutdown(dev); +} + + int platform_dma_configure(struct device *dev) { enum dev_dma_attr attr; @@ -1485,6 +1512,9 @@ struct bus_type platform_bus_type = { .dev_groups = platform_dev_groups, .match = platform_match, .uevent = platform_uevent, + .probe = platform_probe, + .remove = platform_remove, + .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, .pm = &platform_dev_pm_ops, }; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 743268996336..9a14eedacb92 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -21,6 +21,7 @@ #include <linux/suspend.h> #include <linux/export.h> #include <linux/cpu.h> +#include <linux/debugfs.h> #include "power.h" @@ -210,6 +211,18 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) } #ifdef CONFIG_DEBUG_FS +static struct dentry *genpd_debugfs_dir; + +static void genpd_debug_add(struct generic_pm_domain *genpd); + +static void genpd_debug_remove(struct generic_pm_domain *genpd) +{ + struct dentry *d; + + d = debugfs_lookup(genpd->name, genpd_debugfs_dir); + debugfs_remove(d); +} + static void genpd_update_accounting(struct generic_pm_domain *genpd) { ktime_t delta, now; @@ -234,6 +247,8 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) genpd->accounting_time = now; } #else +static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} +static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} #endif @@ -1142,7 +1157,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff) if (ret) return ret; - if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) + if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) return 0; if (genpd->dev_ops.stop && genpd->dev_ops.start && @@ -1196,7 +1211,7 @@ static int genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) + if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) return pm_generic_resume_noirq(dev); genpd_lock(genpd); @@ -1363,41 +1378,60 @@ static void genpd_complete(struct device *dev) genpd_unlock(genpd); } -/** - * genpd_syscore_switch - Switch power during system core suspend or resume. - * @dev: Device that normally is marked as "always on" to switch power for. - * - * This routine may only be called during the system core (syscore) suspend or - * resume phase for devices whose "always on" flags are set. - */ -static void genpd_syscore_switch(struct device *dev, bool suspend) +static void genpd_switch_state(struct device *dev, bool suspend) { struct generic_pm_domain *genpd; + bool use_lock; genpd = dev_to_genpd_safe(dev); if (!genpd) return; + use_lock = genpd_is_irq_safe(genpd); + + if (use_lock) + genpd_lock(genpd); + if (suspend) { genpd->suspended_count++; - genpd_sync_power_off(genpd, false, 0); + genpd_sync_power_off(genpd, use_lock, 0); } else { - genpd_sync_power_on(genpd, false, 0); + genpd_sync_power_on(genpd, use_lock, 0); genpd->suspended_count--; } + + if (use_lock) + genpd_unlock(genpd); } -void pm_genpd_syscore_poweroff(struct device *dev) +/** + * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev + * @dev: The device that is attached to the genpd, that can be suspended. + * + * This routine should typically be called for a device that needs to be + * suspended during the syscore suspend phase. It may also be called during + * suspend-to-idle to suspend a corresponding CPU device that is attached to a + * genpd. + */ +void dev_pm_genpd_suspend(struct device *dev) { - genpd_syscore_switch(dev, true); + genpd_switch_state(dev, true); } -EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); +EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); -void pm_genpd_syscore_poweron(struct device *dev) +/** + * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev + * @dev: The device that is attached to the genpd, which needs to be resumed. + * + * This routine should typically be called for a device that needs to be resumed + * during the syscore resume phase. It may also be called during suspend-to-idle + * to resume a corresponding CPU device that is attached to a genpd. + */ +void dev_pm_genpd_resume(struct device *dev) { - genpd_syscore_switch(dev, false); + genpd_switch_state(dev, false); } -EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); +EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); #else /* !CONFIG_PM_SLEEP */ @@ -1954,6 +1988,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); + genpd_debug_add(genpd); mutex_unlock(&gpd_list_lock); return 0; @@ -1987,6 +2022,7 @@ static int genpd_remove(struct generic_pm_domain *genpd) kfree(link); } + genpd_debug_remove(genpd); list_del(&genpd->gpd_list_node); genpd_unlock(genpd); cancel_work_sync(&genpd->power_off_work); @@ -2249,7 +2285,7 @@ int of_genpd_add_provider_onecell(struct device_node *np, * Save table for faster processing while setting * performance state. */ - genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); WARN_ON(IS_ERR(genpd->opp_table)); } @@ -2893,14 +2929,6 @@ core_initcall(genpd_bus_init); /*** debugfs support ***/ #ifdef CONFIG_DEBUG_FS -#include <linux/pm.h> -#include <linux/device.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/kobject.h> -static struct dentry *genpd_debugfs_dir; - /* * TODO: This function is a slightly modified version of rtpm_status_show * from sysfs.c, so generalize it. @@ -3177,9 +3205,34 @@ DEFINE_SHOW_ATTRIBUTE(total_idle_time); DEFINE_SHOW_ATTRIBUTE(devices); DEFINE_SHOW_ATTRIBUTE(perf_state); -static int __init genpd_debug_init(void) +static void genpd_debug_add(struct generic_pm_domain *genpd) { struct dentry *d; + + if (!genpd_debugfs_dir) + return; + + d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); + + debugfs_create_file("current_state", 0444, + d, genpd, &status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &devices_fops); + if (genpd->set_performance_state) + debugfs_create_file("perf_state", 0444, + d, genpd, &perf_state_fops); +} + +static int __init genpd_debug_init(void) +{ struct generic_pm_domain *genpd; genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -3187,25 +3240,8 @@ static int __init genpd_debug_init(void) debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, NULL, &summary_fops); - list_for_each_entry(genpd, &gpd_list, gpd_list_node) { - d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); - - debugfs_create_file("current_state", 0444, - d, genpd, &status_fops); - debugfs_create_file("sub_domains", 0444, - d, genpd, &sub_domains_fops); - debugfs_create_file("idle_states", 0444, - d, genpd, &idle_states_fops); - debugfs_create_file("active_time", 0444, - d, genpd, &active_time_fops); - debugfs_create_file("total_idle_time", 0444, - d, genpd, &total_idle_time_fops); - debugfs_create_file("devices", 0444, - d, genpd, &devices_fops); - if (genpd->set_performance_state) - debugfs_create_file("perf_state", 0444, - d, genpd, &perf_state_fops); - } + list_for_each_entry(genpd, &gpd_list, gpd_list_node) + genpd_debug_add(genpd); return 0; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c7ac49042cee..46793276598d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -441,9 +441,9 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) { - dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), + dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event), ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? - ", may wakeup" : ""); + ", may wakeup" : "", dev->power.driver_flags); } static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, @@ -1359,7 +1359,7 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) spin_lock_irq(&parent->power.lock); - if (dev->power.wakeup_path && !parent->power.ignore_children) + if (device_wakeup_path(dev) && !parent->power.ignore_children) parent->power.wakeup_path = true; spin_unlock_irq(&parent->power.lock); @@ -1627,7 +1627,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Complete; /* Avoid direct_complete to let wakeup_path propagate. */ - if (device_may_wakeup(dev) || dev->power.wakeup_path) + if (device_may_wakeup(dev) || device_wakeup_path(dev)) dev->power.direct_complete = false; if (dev->power.direct_complete) { diff --git a/drivers/base/property.c b/drivers/base/property.c index 4c43d30145c6..35b95c6ac0c6 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -615,6 +615,31 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(fwnode_get_next_parent); /** + * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode + * @fwnode: firmware node + * + * Given a firmware node (@fwnode), this function finds its closest ancestor + * firmware node that has a corresponding struct device and returns that struct + * device. + * + * The caller of this function is expected to call put_device() on the returned + * device when they are done. + */ +struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) +{ + struct device *dev = NULL; + + fwnode_handle_get(fwnode); + do { + fwnode = fwnode_get_next_parent(fwnode); + if (fwnode) + dev = get_dev_from_fwnode(fwnode); + } while (fwnode && !dev); + fwnode_handle_put(fwnode); + return dev; +} + +/** * fwnode_count_parents - Return the number of parents a node has * @fwnode: The node the parents of which are to be counted * @@ -661,6 +686,33 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); /** + * fwnode_is_ancestor_of - Test if @test_ancestor is ancestor of @test_child + * @test_ancestor: Firmware which is tested for being an ancestor + * @test_child: Firmware which is tested for being the child + * + * A node is considered an ancestor of itself too. + * + * Returns true if @test_ancestor is an ancestor of @test_child. + * Otherwise, returns false. + */ +bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, + struct fwnode_handle *test_child) +{ + if (!test_ancestor) + return false; + + fwnode_handle_get(test_child); + while (test_child) { + if (test_child == test_ancestor) { + fwnode_handle_put(test_child); + return true; + } + test_child = fwnode_get_next_parent(test_child); + } + return false; +} + +/** * fwnode_get_next_child_node - Return the next child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index bcb90d8c3960..50b1e2d06a25 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,7 +4,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM) select IRQ_DOMAIN if REGMAP_IRQ bool @@ -46,6 +46,10 @@ config REGMAP_SOUNDWIRE tristate depends on SOUNDWIRE +config REGMAP_SOUNDWIRE_MBQ + tristate + depends on SOUNDWIRE + config REGMAP_SCCB tristate depends on I2C diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index ac1b69ee4051..33f63adb5b3d 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o obj-$(CONFIG_REGMAP_W1) += regmap-w1.o obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o +obj-$(CONFIG_REGMAP_SOUNDWIRE_MBQ) += regmap-sdw-mbq.o obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 8dfac7f3ed7a..ff2ee87987c7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map) devname = dev_name(map->dev); if (name) { - map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", + if (!map->debugfs_name) { + map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", devname, name); + if (!map->debugfs_name) + return; + } name = map->debugfs_name; } else { name = devname; @@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map) if (!strcmp(name, "dummy")) { kfree(map->debugfs_name); - map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", dummy_index); + if (!map->debugfs_name) + return; name = map->debugfs_name; dummy_index++; } diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index ad5c2de395d1..19db764ffa4a 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -803,13 +803,12 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, } if (irq_base) - d->domain = irq_domain_add_legacy(to_of_node(fwnode), - chip->num_irqs, irq_base, - 0, ®map_domain_ops, d); + d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, + irq_base, 0, + ®map_domain_ops, d); else - d->domain = irq_domain_add_linear(to_of_node(fwnode), - chip->num_irqs, - ®map_domain_ops, d); + d->domain = irq_domain_create_linear(fwnode, chip->num_irqs, + ®map_domain_ops, d); if (!d->domain) { dev_err(map->dev, "Failed to create IRQ domain\n"); ret = -ENOMEM; diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index af967d8f975e..f9cd51afb9d2 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -16,6 +16,7 @@ struct regmap_mmio_context { void __iomem *regs; unsigned val_bytes; + bool relaxed_mmio; bool attached_clk; struct clk *clk; @@ -75,6 +76,13 @@ static void regmap_mmio_write8(struct regmap_mmio_context *ctx, writeb(val, ctx->regs + reg); } +static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeb_relaxed(val, ctx->regs + reg); +} + static void regmap_mmio_write16le(struct regmap_mmio_context *ctx, unsigned int reg, unsigned int val) @@ -82,6 +90,13 @@ static void regmap_mmio_write16le(struct regmap_mmio_context *ctx, writew(val, ctx->regs + reg); } +static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew_relaxed(val, ctx->regs + reg); +} + static void regmap_mmio_write16be(struct regmap_mmio_context *ctx, unsigned int reg, unsigned int val) @@ -96,6 +111,13 @@ static void regmap_mmio_write32le(struct regmap_mmio_context *ctx, writel(val, ctx->regs + reg); } +static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel_relaxed(val, ctx->regs + reg); +} + static void regmap_mmio_write32be(struct regmap_mmio_context *ctx, unsigned int reg, unsigned int val) @@ -110,6 +132,13 @@ static void regmap_mmio_write64le(struct regmap_mmio_context *ctx, { writeq(val, ctx->regs + reg); } + +static void regmap_mmio_write64le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeq_relaxed(val, ctx->regs + reg); +} #endif static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val) @@ -137,12 +166,24 @@ static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx, return readb(ctx->regs + reg); } +static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readb_relaxed(ctx->regs + reg); +} + static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx, unsigned int reg) { return readw(ctx->regs + reg); } +static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readw_relaxed(ctx->regs + reg); +} + static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx, unsigned int reg) { @@ -155,6 +196,12 @@ static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx, return readl(ctx->regs + reg); } +static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readl_relaxed(ctx->regs + reg); +} + static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx, unsigned int reg) { @@ -167,6 +214,12 @@ static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx, { return readq(ctx->regs + reg); } + +static unsigned int regmap_mmio_read64le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readq_relaxed(ctx->regs + reg); +} #endif static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val) @@ -237,6 +290,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, ctx->regs = regs; ctx->val_bytes = config->val_bits / 8; + ctx->relaxed_mmio = config->use_relaxed_mmio; ctx->clk = ERR_PTR(-ENODEV); switch (regmap_get_val_endian(dev, ®map_mmio, config)) { @@ -247,21 +301,41 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, #endif switch (config->val_bits) { case 8: - ctx->reg_read = regmap_mmio_read8; - ctx->reg_write = regmap_mmio_write8; + if (ctx->relaxed_mmio) { + ctx->reg_read = regmap_mmio_read8_relaxed; + ctx->reg_write = regmap_mmio_write8_relaxed; + } else { + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + } break; case 16: - ctx->reg_read = regmap_mmio_read16le; - ctx->reg_write = regmap_mmio_write16le; + if (ctx->relaxed_mmio) { + ctx->reg_read = regmap_mmio_read16le_relaxed; + ctx->reg_write = regmap_mmio_write16le_relaxed; + } else { + ctx->reg_read = regmap_mmio_read16le; + ctx->reg_write = regmap_mmio_write16le; + } break; case 32: - ctx->reg_read = regmap_mmio_read32le; - ctx->reg_write = regmap_mmio_write32le; + if (ctx->relaxed_mmio) { + ctx->reg_read = regmap_mmio_read32le_relaxed; + ctx->reg_write = regmap_mmio_write32le_relaxed; + } else { + ctx->reg_read = regmap_mmio_read32le; + ctx->reg_write = regmap_mmio_write32le; + } break; #ifdef CONFIG_64BIT case 64: - ctx->reg_read = regmap_mmio_read64le; - ctx->reg_write = regmap_mmio_write64le; + if (ctx->relaxed_mmio) { + ctx->reg_read = regmap_mmio_read64le_relaxed; + ctx->reg_write = regmap_mmio_write64le_relaxed; + } else { + ctx->reg_read = regmap_mmio_read64le; + ctx->reg_write = regmap_mmio_write64le; + } break; #endif default: diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c new file mode 100644 index 000000000000..8ce30650b97c --- /dev/null +++ b/drivers/base/regmap/regmap-sdw-mbq.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2020 Intel Corporation. + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/soundwire/sdw.h> +#include <linux/soundwire/sdw_registers.h> +#include "internal.h" + +static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val) +{ + struct device *dev = context; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + int ret; + + ret = sdw_write(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff); + if (ret < 0) + return ret; + + return sdw_write(slave, reg, val & 0xff); +} + +static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val) +{ + struct device *dev = context; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + int read0; + int read1; + + read0 = sdw_read(slave, reg); + if (read0 < 0) + return read0; + + read1 = sdw_read(slave, SDW_SDCA_MBQ_CTL(reg)); + if (read1 < 0) + return read1; + + *val = (read1 << 8) | read0; + + return 0; +} + +static struct regmap_bus regmap_sdw_mbq = { + .reg_read = regmap_sdw_mbq_read, + .reg_write = regmap_sdw_mbq_write, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_mbq_config_check(const struct regmap_config *config) +{ + /* MBQ-based controls are only 16-bits for now */ + if (config->val_bits != 16) + return -ENOTSUPP; + + /* Registers are 32 bits wide */ + if (config->reg_bits != 32) + return -ENOTSUPP; + + if (config->pad_bits != 0) + return -ENOTSUPP; + + return 0; +} + +struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + int ret; + + ret = regmap_sdw_mbq_config_check(config); + if (ret) + return ERR_PTR(ret); + + return __regmap_init(&sdw->dev, ®map_sdw_mbq, + &sdw->dev, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq); + +struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + int ret; + + ret = regmap_sdw_mbq_config_check(config); + if (ret) + return ERR_PTR(ret); + + return __devm_regmap_init(&sdw->dev, ®map_sdw_mbq, + &sdw->dev, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq); + +MODULE_DESCRIPTION("Regmap SoundWire MBQ Module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index c92d614b4943..c83be26434e7 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -2,7 +2,9 @@ // Copyright(c) 2015-17 Intel Corporation. #include <linux/device.h> +#include <linux/errno.h> #include <linux/module.h> +#include <linux/regmap.h> #include <linux/soundwire/sdw.h> #include "internal.h" diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 5db536ccfcd6..297e95be25b3 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1924,12 +1924,15 @@ int _regmap_write(struct regmap *map, unsigned int reg, } } - if (regmap_should_log(map)) - dev_info(map->dev, "%x <= %x\n", reg, val); + ret = map->reg_write(context, reg, val); + if (ret == 0) { + if (regmap_should_log(map)) + dev_info(map->dev, "%x <= %x\n", reg, val); - trace_regmap_reg_write(map, reg, val); + trace_regmap_reg_write(map, reg, val); + } - return map->reg_write(context, reg, val); + return ret; } /** diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h index d4066fa079ab..9abee14df9ee 100644 --- a/drivers/base/regmap/trace.h +++ b/drivers/base/regmap/trace.h @@ -126,7 +126,6 @@ TRACE_EVENT(regcache_sync, __string( name, regmap_name(map) ) __string( status, status ) __string( type, type ) - __field( int, type ) ), TP_fast_assign( diff --git a/drivers/base/soc.c b/drivers/base/soc.c index d34609bb7386..0af5363a582c 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -168,7 +168,7 @@ out1: } EXPORT_SYMBOL_GPL(soc_device_register); -/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ +/* Ensure soc_dev->attr is freed after calling soc_device_unregister. */ void soc_device_unregister(struct soc_device *soc_dev) { device_unregister(&soc_dev->dev); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 010828fc785b..4a4b2008fbc2 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -653,7 +653,7 @@ swnode_register(const struct software_node *node, struct swnode *parent, swnode->parent = parent; swnode->allocated = allocated; swnode->kobj.kset = swnode_kset; - swnode->fwnode.ops = &software_node_ops; + fwnode_init(&swnode->fwnode, &software_node_ops); ida_init(&swnode->child_ids); INIT_LIST_HEAD(&swnode->entry); |