diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/firmware_loader/fallback_table.c | 13 | ||||
-rw-r--r-- | drivers/base/memory.c | 219 | ||||
-rw-r--r-- | drivers/base/node.c | 35 |
3 files changed, 154 insertions, 113 deletions
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index 776dd69cf5be..ba9d30b28edc 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -16,9 +16,6 @@ * firmware fallback configuration table */ -static unsigned int zero; -static unsigned int one = 1; - struct firmware_fallback_config fw_fallback_config = { .force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK), .loading_timeout = 60, @@ -26,6 +23,7 @@ struct firmware_fallback_config fw_fallback_config = { }; EXPORT_SYMBOL_GPL(fw_fallback_config); +#ifdef CONFIG_SYSCTL struct ctl_table firmware_config_table[] = { { .procname = "force_sysfs_fallback", @@ -33,8 +31,8 @@ struct ctl_table firmware_config_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { .procname = "ignore_sysfs_fallback", @@ -42,9 +40,10 @@ struct ctl_table firmware_config_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { } }; EXPORT_SYMBOL_GPL(firmware_config_table); +#endif diff --git a/drivers/base/memory.c b/drivers/base/memory.c index f180427e48f4..20c39d1bcef8 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -34,11 +34,21 @@ static DEFINE_MUTEX(mem_sysfs_mutex); static int sections_per_block; -static inline int base_memory_block_id(int section_nr) +static inline unsigned long base_memory_block_id(unsigned long section_nr) { return section_nr / sections_per_block; } +static inline unsigned long pfn_to_block_id(unsigned long pfn) +{ + return base_memory_block_id(pfn_to_section_nr(pfn)); +} + +static inline unsigned long phys_to_block_id(unsigned long phys) +{ + return pfn_to_block_id(PFN_DOWN(phys)); +} + static int memory_subsys_online(struct device *dev); static int memory_subsys_offline(struct device *dev); @@ -126,9 +136,9 @@ static ssize_t phys_index_show(struct device *dev, static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long i, pfn; - int ret = 1; struct memory_block *mem = to_memory_block(dev); + unsigned long pfn; + int ret = 1, i; if (mem->state != MEM_ONLINE) goto out; @@ -578,23 +588,13 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) return 0; } -/* - * A reference for the returned object is held and the reference for the - * hinted object is released. - */ -struct memory_block *find_memory_block_hinted(struct mem_section *section, - struct memory_block *hint) +/* A reference for the returned memory block device is acquired. */ +static struct memory_block *find_memory_block_by_id(unsigned long block_id) { - int block_id = base_memory_block_id(__section_nr(section)); - struct device *hintdev = hint ? &hint->dev : NULL; struct device *dev; - dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev); - if (hint) - put_device(&hint->dev); - if (!dev) - return NULL; - return to_memory_block(dev); + dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL); + return dev ? to_memory_block(dev) : NULL; } /* @@ -607,7 +607,9 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, */ struct memory_block *find_memory_block(struct mem_section *section) { - return find_memory_block_hinted(section, NULL); + unsigned long block_id = base_memory_block_id(__section_nr(section)); + + return find_memory_block_by_id(block_id); } static struct attribute *memory_memblk_attrs[] = { @@ -652,20 +654,22 @@ int register_memory(struct memory_block *memory) } static int init_memory_block(struct memory_block **memory, - struct mem_section *section, unsigned long state) + unsigned long block_id, unsigned long state) { struct memory_block *mem; unsigned long start_pfn; - int scn_nr; int ret = 0; + mem = find_memory_block_by_id(block_id); + if (mem) { + put_device(&mem->dev); + return -EEXIST; + } mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; - scn_nr = __section_nr(section); - mem->start_section_nr = - base_memory_block_id(scn_nr) * sections_per_block; + mem->start_section_nr = block_id * sections_per_block; mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; mem->state = state; start_pfn = section_nr_to_pfn(mem->start_section_nr); @@ -677,97 +681,101 @@ static int init_memory_block(struct memory_block **memory, return ret; } -static int add_memory_block(int base_section_nr) +static int add_memory_block(unsigned long base_section_nr) { + int ret, section_count = 0; struct memory_block *mem; - int i, ret, section_count = 0, section_nr; + unsigned long nr; - for (i = base_section_nr; - i < base_section_nr + sections_per_block; - i++) { - if (!present_section_nr(i)) - continue; - if (section_count == 0) - section_nr = i; - section_count++; - } + for (nr = base_section_nr; nr < base_section_nr + sections_per_block; + nr++) + if (present_section_nr(nr)) + section_count++; if (section_count == 0) return 0; - ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); + ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), + MEM_ONLINE); if (ret) return ret; mem->section_count = section_count; return 0; } +static void unregister_memory(struct memory_block *memory) +{ + if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) + return; + + /* drop the ref. we got via find_memory_block() */ + put_device(&memory->dev); + device_unregister(&memory->dev); +} + /* - * need an interface for the VM to add new memory regions, - * but without onlining it. + * Create memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * will be initialized as offline. */ -int hotplug_memory_register(int nid, struct mem_section *section) +int create_memory_block_devices(unsigned long start, unsigned long size) { - int ret = 0; + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); + unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; + unsigned long block_id; + int ret = 0; - mutex_lock(&mem_sysfs_mutex); + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) + return -EINVAL; - mem = find_memory_block(section); - if (mem) { - mem->section_count++; - put_device(&mem->dev); - } else { - ret = init_memory_block(&mem, section, MEM_OFFLINE); + mutex_lock(&mem_sysfs_mutex); + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + ret = init_memory_block(&mem, block_id, MEM_OFFLINE); if (ret) - goto out; - mem->section_count++; + break; + mem->section_count = sections_per_block; + } + if (ret) { + end_block_id = block_id; + for (block_id = start_block_id; block_id != end_block_id; + block_id++) { + mem = find_memory_block_by_id(block_id); + mem->section_count = 0; + unregister_memory(mem); + } } - -out: mutex_unlock(&mem_sysfs_mutex); return ret; } -#ifdef CONFIG_MEMORY_HOTREMOVE -static void -unregister_memory(struct memory_block *memory) -{ - BUG_ON(memory->dev.bus != &memory_subsys); - - /* drop the ref. we got via find_memory_block() */ - put_device(&memory->dev); - device_unregister(&memory->dev); -} - -void unregister_memory_section(struct mem_section *section) +/* + * Remove memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * have to be offline. + */ +void remove_memory_block_devices(unsigned long start, unsigned long size) { + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); + const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; + unsigned long block_id; - if (WARN_ON_ONCE(!present_section(section))) + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) return; mutex_lock(&mem_sysfs_mutex); - - /* - * Some users of the memory hotplug do not want/need memblock to - * track all sections. Skip over those. - */ - mem = find_memory_block(section); - if (!mem) - goto out_unlock; - - unregister_mem_sect_under_nodes(mem, __section_nr(section)); - - mem->section_count--; - if (mem->section_count == 0) + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + mem = find_memory_block_by_id(block_id); + if (WARN_ON_ONCE(!mem)) + continue; + mem->section_count = 0; + unregister_memory_block_under_nodes(mem); unregister_memory(mem); - else - put_device(&mem->dev); - -out_unlock: + } mutex_unlock(&mem_sysfs_mutex); } -#endif /* CONFIG_MEMORY_HOTREMOVE */ /* return true if the memory block is offlined, otherwise, return false */ bool is_memblock_offlined(struct memory_block *mem) @@ -804,10 +812,9 @@ static const struct attribute_group *memory_root_attr_groups[] = { */ int __init memory_dev_init(void) { - unsigned int i; int ret; int err; - unsigned long block_sz; + unsigned long block_sz, nr; ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); if (ret) @@ -821,9 +828,9 @@ int __init memory_dev_init(void) * during boot and have been initialized */ mutex_lock(&mem_sysfs_mutex); - for (i = 0; i <= __highest_present_section_nr; - i += sections_per_block) { - err = add_memory_block(i); + for (nr = 0; nr <= __highest_present_section_nr; + nr += sections_per_block) { + err = add_memory_block(nr); if (!ret) ret = err; } @@ -834,3 +841,43 @@ out: printk(KERN_ERR "%s() failed: %d\n", __func__, ret); return ret; } + +/** + * walk_memory_blocks - walk through all present memory blocks overlapped + * by the range [start, start + size) + * + * @start: start address of the memory range + * @size: size of the memory range + * @arg: argument passed to func + * @func: callback for each memory section walked + * + * This function walks through all present memory blocks overlapped by the + * range [start, start + size), calling func on each memory block. + * + * In case func() returns an error, walking is aborted and the error is + * returned. + */ +int walk_memory_blocks(unsigned long start, unsigned long size, + void *arg, walk_memory_blocks_func_t func) +{ + const unsigned long start_block_id = phys_to_block_id(start); + const unsigned long end_block_id = phys_to_block_id(start + size - 1); + struct memory_block *mem; + unsigned long block_id; + int ret = 0; + + if (!size) + return 0; + + for (block_id = start_block_id; block_id <= end_block_id; block_id++) { + mem = find_memory_block_by_id(block_id); + if (!mem) + continue; + + ret = func(mem, arg); + put_device(&mem->dev); + if (ret) + break; + } + return ret; +} diff --git a/drivers/base/node.c b/drivers/base/node.c index aa878fbcf705..75b7e6f6535b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -753,7 +753,8 @@ static int __ref get_nid_for_pfn(unsigned long pfn) } /* register memory section under specified node if it spans that node */ -int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) +static int register_mem_sect_under_node(struct memory_block *mem_blk, + void *arg) { int ret, nid = *(int *)arg; unsigned long pfn, sect_start_pfn, sect_end_pfn; @@ -802,23 +803,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) return 0; } -/* unregister memory section under all nodes that it spans */ -int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index) +/* + * Unregister memory block device under all nodes that it spans. + * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes). + */ +void unregister_memory_block_under_nodes(struct memory_block *mem_blk) { - NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); unsigned long pfn, sect_start_pfn, sect_end_pfn; + static nodemask_t unlinked_nodes; - if (!mem_blk) { - NODEMASK_FREE(unlinked_nodes); - return -EFAULT; - } - if (!unlinked_nodes) - return -ENOMEM; - nodes_clear(*unlinked_nodes); - - sect_start_pfn = section_nr_to_pfn(phys_index); - sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; + nodes_clear(unlinked_nodes); + sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); + sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int nid; @@ -827,21 +823,20 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, continue; if (!node_online(nid)) continue; - if (node_test_and_set(nid, *unlinked_nodes)) + if (node_test_and_set(nid, unlinked_nodes)) continue; sysfs_remove_link(&node_devices[nid]->dev.kobj, kobject_name(&mem_blk->dev.kobj)); sysfs_remove_link(&mem_blk->dev.kobj, kobject_name(&node_devices[nid]->dev.kobj)); } - NODEMASK_FREE(unlinked_nodes); - return 0; } int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) { - return walk_memory_range(start_pfn, end_pfn, (void *)&nid, - register_mem_sect_under_node); + return walk_memory_blocks(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), (void *)&nid, + register_mem_sect_under_node); } #ifdef CONFIG_HUGETLBFS |