diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/core.c | 71 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 2 | ||||
-rw-r--r-- | drivers/base/memory.c | 101 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 10 |
4 files changed, 132 insertions, 52 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 4a8bf8cda52b..54ba506e5a89 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode) fwnode_links_purge_consumers(fwnode); } -static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) +void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) { struct fwnode_handle *child; @@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) fwnode_for_each_available_child_node(fwnode, child) fw_devlink_purge_absent_suppliers(child); } +EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); #ifdef CONFIG_SRCU static DEFINE_MUTEX(device_links_lock); @@ -193,6 +194,17 @@ int device_links_read_lock_held(void) { return srcu_read_lock_held(&device_links_srcu); } + +static void device_link_synchronize_removal(void) +{ + synchronize_srcu(&device_links_srcu); +} + +static void device_link_remove_from_lists(struct device_link *link) +{ + list_del_rcu(&link->s_node); + list_del_rcu(&link->c_node); +} #else /* !CONFIG_SRCU */ static DECLARE_RWSEM(device_links_lock); @@ -223,6 +235,16 @@ int device_links_read_lock_held(void) return lockdep_is_held(&device_links_lock); } #endif + +static inline void device_link_synchronize_removal(void) +{ +} + +static void device_link_remove_from_lists(struct device_link *link) +{ + list_del(&link->s_node); + list_del(&link->c_node); +} #endif /* !CONFIG_SRCU */ static bool device_is_ancestor(struct device *dev, struct device *target) @@ -444,8 +466,13 @@ static struct attribute *devlink_attrs[] = { }; ATTRIBUTE_GROUPS(devlink); -static void device_link_free(struct device_link *link) +static void device_link_release_fn(struct work_struct *work) { + struct device_link *link = container_of(work, struct device_link, rm_work); + + /* Ensure that all references to the link object have been dropped. */ + device_link_synchronize_removal(); + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); @@ -454,24 +481,19 @@ static void device_link_free(struct device_link *link) kfree(link); } -#ifdef CONFIG_SRCU -static void __device_link_free_srcu(struct rcu_head *rhead) -{ - device_link_free(container_of(rhead, struct device_link, rcu_head)); -} - static void devlink_dev_release(struct device *dev) { struct device_link *link = to_devlink(dev); - call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); -} -#else -static void devlink_dev_release(struct device *dev) -{ - device_link_free(to_devlink(dev)); + INIT_WORK(&link->rm_work, device_link_release_fn); + /* + * It may take a while to complete this work because of the SRCU + * synchronization in device_link_release_fn() and if the consumer or + * supplier devices get deleted when it runs, so put it into the "long" + * workqueue. + */ + queue_work(system_long_wq, &link->rm_work); } -#endif static struct class devlink_class = { .name = "devlink", @@ -845,7 +867,6 @@ out: } EXPORT_SYMBOL_GPL(device_link_add); -#ifdef CONFIG_SRCU static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); @@ -855,25 +876,9 @@ static void __device_link_del(struct kref *kref) pm_runtime_drop_link(link); - list_del_rcu(&link->s_node); - list_del_rcu(&link->c_node); - device_unregister(&link->link_dev); -} -#else /* !CONFIG_SRCU */ -static void __device_link_del(struct kref *kref) -{ - struct device_link *link = container_of(kref, struct device_link, kref); - - dev_info(link->consumer, "Dropping the link to %s\n", - dev_name(link->supplier)); - - pm_runtime_drop_link(link); - - list_del(&link->s_node); - list_del(&link->c_node); + device_link_remove_from_lists(link); device_unregister(&link->link_dev); } -#endif /* !CONFIG_SRCU */ static void device_link_put_kref(struct device_link *link) { diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 78355095e00d..4fdb8219cd08 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -15,6 +15,7 @@ #include <linux/kernel_read_file.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/initrd.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> @@ -504,6 +505,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, if (!path) return -ENOMEM; + wait_for_initramfs(); for (i = 0; i < ARRAY_SIZE(fw_path); i++) { size_t file_size = 0; size_t *file_size_ptr = NULL; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index f35298425575..b31b3af5c490 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -169,30 +169,98 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +static int memory_block_online(struct memory_block *mem) +{ + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages); + + /* + * Although vmemmap pages have a different lifecycle than the pages + * they describe (they remain until the memory is unplugged), doing + * their initialization and accounting at memory onlining/offlining + * stage helps to keep accounting easier to follow - e.g vmemmaps + * belong to the same zone as the memory they backed. + */ + if (nr_vmemmap_pages) { + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); + if (ret) + return ret; + } + + ret = online_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages, zone); + if (ret) { + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + return ret; + } + + /* + * Account once onlining succeeded. If the zone was unpopulated, it is + * now already properly populated. + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); + + return ret; +} + +static int memory_block_offline(struct memory_block *mem) +{ + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + + /* + * Unaccount before offlining, such that unpopulated zone and kthreads + * can properly be torn down in offline_pages(). + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, -nr_vmemmap_pages); + + ret = offline_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages); + if (ret) { + /* offline_pages() failed. Account back. */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); + return ret; + } + + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + + return ret; +} + /* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. */ static int -memory_block_action(unsigned long start_section_nr, unsigned long action, - int online_type, int nid) +memory_block_action(struct memory_block *mem, unsigned long action) { - unsigned long start_pfn; - unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; int ret; - start_pfn = section_nr_to_pfn(start_section_nr); - switch (action) { case MEM_ONLINE: - ret = online_pages(start_pfn, nr_pages, online_type, nid); + ret = memory_block_online(mem); break; case MEM_OFFLINE: - ret = offline_pages(start_pfn, nr_pages); + ret = memory_block_offline(mem); break; default: WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " - "%ld\n", __func__, start_section_nr, action, action); + "%ld\n", __func__, mem->start_section_nr, action, action); ret = -EINVAL; } @@ -210,9 +278,7 @@ static int memory_block_change_state(struct memory_block *mem, if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; - ret = memory_block_action(mem->start_section_nr, to_state, - mem->online_type, mem->nid); - + ret = memory_block_action(mem, to_state); mem->state = ret ? from_state_req : to_state; return ret; @@ -567,7 +633,8 @@ int register_memory(struct memory_block *memory) return ret; } -static int init_memory_block(unsigned long block_id, unsigned long state) +static int init_memory_block(unsigned long block_id, unsigned long state, + unsigned long nr_vmemmap_pages) { struct memory_block *mem; int ret = 0; @@ -584,6 +651,7 @@ static int init_memory_block(unsigned long block_id, unsigned long state) mem->start_section_nr = block_id * sections_per_block; mem->state = state; mem->nid = NUMA_NO_NODE; + mem->nr_vmemmap_pages = nr_vmemmap_pages; ret = register_memory(mem); @@ -603,7 +671,7 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; return init_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE); + MEM_ONLINE, 0); } static void unregister_memory(struct memory_block *memory) @@ -625,7 +693,8 @@ static void unregister_memory(struct memory_block *memory) * * Called under device_hotplug_lock. */ -int create_memory_block_devices(unsigned long start, unsigned long size) +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages) { const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); @@ -638,7 +707,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size) return -EINVAL; for (block_id = start_block_id; block_id != end_block_id; block_id++) { - ret = init_memory_block(block_id, MEM_OFFLINE); + ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages); if (ret) break; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 1fc1a992f90c..b570848d23e0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; + dev->power.needs_force_resume = 0; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev) * its parent, but set its status to RPM_SUSPENDED anyway in case this * function will be called again for it in the meantime. */ - if (pm_runtime_need_not_resume(dev)) + if (pm_runtime_need_not_resume(dev)) { pm_runtime_set_suspended(dev); - else + } else { __update_runtime_status(dev, RPM_SUSPENDED); + dev->power.needs_force_resume = 1; + } return 0; @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev) int (*callback)(struct device *); int ret = 0; - if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) goto out; /* @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev) pm_runtime_mark_last_busy(dev); out: + dev->power.needs_force_resume = 0; pm_runtime_enable(dev); return ret; } |