diff options
Diffstat (limited to 'drivers/acpi')
33 files changed, 2959 insertions, 776 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 2b652f1d4edb..114cf48085ab 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -54,6 +54,9 @@ config ACPI_GENERIC_GSI config ACPI_SYSTEM_POWER_STATES_SUPPORT bool +config ACPI_CCA_REQUIRED + bool + config ACPI_SLEEP bool depends on SUSPEND || HIBERNATION @@ -62,7 +65,7 @@ config ACPI_SLEEP config ACPI_PROCFS_POWER bool "Deprecated power /proc/acpi directories" - depends on PROC_FS + depends on X86 && PROC_FS help For backwards compatibility, this option allows deprecated power /proc/acpi/ directories to exist, even when @@ -403,6 +406,32 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. +config ACPI_NFIT + tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" + depends on PHYS_ADDR_T_64BIT + depends on BLK_DEV + select LIBNVDIMM + help + Infrastructure to probe ACPI 6 compliant platforms for + NVDIMMs (NFIT) and register a libnvdimm device tree. In + addition to storage devices this also enables libnvdimm to pass + ACPI._DSM messages for platform/dimm configuration. + + To compile this driver as a module, choose M here: + the module will be called nfit. + +config ACPI_NFIT_DEBUG + bool "NFIT DSM debug" + depends on ACPI_NFIT + depends on DYNAMIC_DEBUG + default n + help + Enabling this option causes the nfit driver to dump the + input and output buffers of _DSM operations on the ACPI0012 + device and its children. This can be very verbose, so leave + it disabled unless you are debugging a hardware / firmware + issue. + source "drivers/acpi/apei/Kconfig" config ACPI_EXTLOG diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 8a063e276530..8321430d7f24 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -52,9 +52,6 @@ acpi-$(CONFIG_X86) += acpi_cmos_rtc.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o -ifdef CONFIG_ACPI_VIDEO -acpi-y += video_detect.o -endif acpi-y += acpi_lpat.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o @@ -71,6 +68,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-y += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o +obj-$(CONFIG_ACPI_NFIT) += nfit.o obj-y += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o @@ -95,3 +93,5 @@ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o + +video-objs += acpi_video.o video_detect.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index bbcc2b5a70d4..9b5354a2cd08 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -308,7 +308,7 @@ static int thinkpad_e530_quirk(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id ac_dmi_table[] = { +static const struct dmi_system_id ac_dmi_table[] = { { .callback = thinkpad_e530_quirk, .ident = "thinkpad e530", diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 37fb19047603..569ee090343f 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -129,50 +129,50 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } -static struct lpss_device_desc lpt_dev_desc = { +static const struct lpss_device_desc lpt_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, }; -static struct lpss_device_desc lpt_i2c_dev_desc = { +static const struct lpss_device_desc lpt_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, .prv_offset = 0x800, }; -static struct lpss_device_desc lpt_uart_dev_desc = { +static const struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; -static struct lpss_device_desc lpt_sdio_dev_desc = { +static const struct lpss_device_desc lpt_sdio_dev_desc = { .flags = LPSS_LTR, .prv_offset = 0x1000, .prv_size_override = 0x1018, }; -static struct lpss_device_desc byt_pwm_dev_desc = { +static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, }; -static struct lpss_device_desc byt_uart_dev_desc = { +static const struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; -static struct lpss_device_desc byt_spi_dev_desc = { +static const struct lpss_device_desc byt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, }; -static struct lpss_device_desc byt_sdio_dev_desc = { +static const struct lpss_device_desc byt_sdio_dev_desc = { .flags = LPSS_CLK, }; -static struct lpss_device_desc byt_i2c_dev_desc = { +static const struct lpss_device_desc byt_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = byt_i2c_setup, @@ -323,14 +323,14 @@ out: static int acpi_lpss_create_device(struct acpi_device *adev, const struct acpi_device_id *id) { - struct lpss_device_desc *dev_desc; + const struct lpss_device_desc *dev_desc; struct lpss_private_data *pdata; struct resource_entry *rentry; struct list_head resource_list; struct platform_device *pdev; int ret; - dev_desc = (struct lpss_device_desc *)id->driver_data; + dev_desc = (const struct lpss_device_desc *)id->driver_data; if (!dev_desc) { pdev = acpi_create_platform_device(adev); return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 6bc9cbc01ad6..00b39802d7ec 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index) mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) - cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 4bf75597f732..06a67d5f2846 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -103,7 +103,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.fwnode = acpi_fwnode_handle(adev); - pdevinfo.dma_mask = DMA_BIT_MASK(32); + pdevinfo.dma_mask = acpi_check_dma(adev, NULL) ? DMA_BIT_MASK(32) : 0; pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) dev_err(&adev->dev, "platform device creation failed: %ld\n", diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 58f335ca2e75..92a5f738e370 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; - if (pr->phys_id == PHYS_CPUID_INVALID) + if (invalid_phys_cpuid(pr->phys_id)) return -ENODEV; status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); @@ -215,8 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device) union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); - phys_cpuid_t phys_id; - int cpu_index, device_declaration = 0; + int device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; @@ -263,29 +262,28 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } - phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); - if (phys_id == PHYS_CPUID_INVALID) + pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, + pr->acpi_id); + if (invalid_phys_cpuid(pr->phys_id)) acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); - pr->phys_id = phys_id; - cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); + pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; /* * Handle UP system running SMP kernel, with no CPU * entry in MADT */ - if ((cpu_index == -1) && (num_online_cpus() == 1)) - cpu_index = 0; + if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) + pr->id = 0; } - pr->id = cpu_index; /* * Extra Processor objects may be enumerated on MP systems with * less than the max # of CPUs. They should be ignored _iff * they are physically not present. */ - if (pr->id == -1) { + if (invalid_logical_cpuid(pr->id)) { int ret = acpi_processor_hotadd_init(pr); if (ret) return ret; diff --git a/drivers/acpi/video.c b/drivers/acpi/acpi_video.c index cc79d3fedfb2..8c2fe2f2f9fd 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/acpi_video.c @@ -43,7 +43,7 @@ #include <acpi/video.h> #include <asm/uaccess.h> -#include "internal.h" +#define PREFIX "ACPI: " #define ACPI_VIDEO_BUS_NAME "Video Bus" #define ACPI_VIDEO_DEVICE_NAME "Video Device" @@ -78,26 +78,17 @@ module_param(brightness_switch_enabled, bool, 0644); static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); -/* - * For Windows 8 systems: used to decide if video module - * should skip registering backlight interface of its own. - */ -enum { - NATIVE_BACKLIGHT_NOT_SET = -1, - NATIVE_BACKLIGHT_OFF, - NATIVE_BACKLIGHT_ON, -}; - -static int use_native_backlight_param = NATIVE_BACKLIGHT_NOT_SET; -module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); -static int use_native_backlight_dmi = NATIVE_BACKLIGHT_NOT_SET; +static int disable_backlight_sysfs_if = -1; +module_param(disable_backlight_sysfs_if, int, 0444); static int register_count; +static DEFINE_MUTEX(register_count_mutex); static struct mutex video_list_lock; static struct list_head video_bus_head; static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device); static void acpi_video_bus_notify(struct acpi_device *device, u32 event); +void acpi_video_detect_exit(void); static const struct acpi_device_id video_device_ids[] = { {ACPI_VIDEO_HID, 0}, @@ -157,7 +148,6 @@ struct acpi_video_enumerated_device { struct acpi_video_bus { struct acpi_device *device; bool backlight_registered; - bool backlight_notifier_registered; u8 dos_setting; struct acpi_video_enumerated_device *attached_array; u8 attached_count; @@ -170,7 +160,6 @@ struct acpi_video_bus { struct input_dev *input; char phys[32]; /* for input device */ struct notifier_block pm_nb; - struct notifier_block backlight_nb; }; struct acpi_video_device_flags { @@ -241,24 +230,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, u32 level_current, u32 event); static void acpi_video_switch_brightness(struct work_struct *work); -static bool acpi_video_use_native_backlight(void) -{ - if (use_native_backlight_param != NATIVE_BACKLIGHT_NOT_SET) - return use_native_backlight_param; - else if (use_native_backlight_dmi != NATIVE_BACKLIGHT_NOT_SET) - return use_native_backlight_dmi; - return acpi_osi_is_win8(); -} - -bool acpi_video_verify_backlight_support(void) -{ - if (acpi_video_use_native_backlight() && - backlight_device_registered(BACKLIGHT_RAW)) - return false; - return acpi_video_backlight_support(); -} -EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support); - /* backlight device sysfs support */ static int acpi_video_get_brightness(struct backlight_device *bd) { @@ -413,25 +384,21 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) */ static int bqc_offset_aml_bug_workaround; -static int __init video_set_bqc_offset(const struct dmi_system_id *d) +static int video_set_bqc_offset(const struct dmi_system_id *d) { bqc_offset_aml_bug_workaround = 9; return 0; } -static int __init video_disable_native_backlight(const struct dmi_system_id *d) -{ - use_native_backlight_dmi = NATIVE_BACKLIGHT_OFF; - return 0; -} - -static int __init video_enable_native_backlight(const struct dmi_system_id *d) +static int video_disable_backlight_sysfs_if( + const struct dmi_system_id *d) { - use_native_backlight_dmi = NATIVE_BACKLIGHT_ON; + if (disable_backlight_sysfs_if == -1) + disable_backlight_sysfs_if = 1; return 0; } -static struct dmi_system_id video_dmi_table[] __initdata = { +static struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ @@ -477,110 +444,19 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, /* - * These models have a working acpi_video backlight control, and using - * native backlight causes a regression where backlight does not work - * when userspace is not handling brightness key events. Disable - * native_backlight on these to fix this: - * https://bugzilla.kernel.org/show_bug.cgi?id=81691 + * Some machines have a broken acpi-video interface for brightness + * control, but still need an acpi_video_device_lcd_set_level() call + * on resume to turn the backlight power on. We Enable backlight + * control on these systems, but do not register a backlight sysfs + * as brightness control does not work. */ { - .callback = video_disable_native_backlight, - .ident = "ThinkPad T420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "ThinkPad T520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "ThinkPad X201s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), - }, - }, - - /* The native backlight controls do not work on some older machines */ - { - /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ - .callback = video_disable_native_backlight, - .ident = "HP ENVY 15 Notebook", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), - }, - }, - - { - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), - }, - }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"), - }, - }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 730U3E/740U3E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), - }, - }, - { - /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"), - }, - }, - - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ - .callback = video_disable_native_backlight, - .ident = "Dell XPS15 L521X", + /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Portege R830", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), - }, - }, - - /* Non win8 machines which need native backlight nevertheless */ - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ - .callback = video_enable_native_backlight, - .ident = "Lenovo Ideapad Z570", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), }, }, {} @@ -1382,7 +1258,7 @@ acpi_video_switch_brightness(struct work_struct *work) int result = -EINVAL; /* no warning message if acpi_backlight=vendor or a quirk is used */ - if (!acpi_video_verify_backlight_support()) + if (!device->backlight) return; if (!device->brightness) @@ -1657,8 +1533,9 @@ static int acpi_video_resume(struct notifier_block *nb, for (i = 0; i < video->attached_count; i++) { video_device = video->attached_array[i].bind_info; - if (video_device && video_device->backlight) - acpi_video_set_brightness(video_device->backlight); + if (video_device && video_device->brightness) + acpi_video_device_lcd_set_level(video_device, + video_device->brightness->curr); } return NOTIFY_OK; @@ -1707,6 +1584,10 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) result = acpi_video_init_brightness(device); if (result) return; + + if (disable_backlight_sysfs_if > 0) + return; + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; @@ -1729,8 +1610,10 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) &acpi_backlight_ops, &props); kfree(name); - if (IS_ERR(device->backlight)) + if (IS_ERR(device->backlight)) { + device->backlight = NULL; return; + } /* * Save current brightness level in case we have to restore it @@ -1787,7 +1670,7 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) acpi_video_run_bcl_for_osi(video); - if (!acpi_video_verify_backlight_support()) + if (acpi_video_get_backlight_type() != acpi_backlight_video) return 0; mutex_lock(&video->device_list_lock); @@ -1931,56 +1814,6 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) video->input = NULL; } -static int acpi_video_backlight_notify(struct notifier_block *nb, - unsigned long val, void *bd) -{ - struct backlight_device *backlight = bd; - struct acpi_video_bus *video; - - /* acpi_video_verify_backlight_support only cares about raw devices */ - if (backlight->props.type != BACKLIGHT_RAW) - return NOTIFY_DONE; - - video = container_of(nb, struct acpi_video_bus, backlight_nb); - - switch (val) { - case BACKLIGHT_REGISTERED: - if (!acpi_video_verify_backlight_support()) - acpi_video_bus_unregister_backlight(video); - break; - case BACKLIGHT_UNREGISTERED: - acpi_video_bus_register_backlight(video); - break; - } - - return NOTIFY_OK; -} - -static int acpi_video_bus_add_backlight_notify_handler( - struct acpi_video_bus *video) -{ - int error; - - video->backlight_nb.notifier_call = acpi_video_backlight_notify; - video->backlight_nb.priority = 0; - error = backlight_register_notifier(&video->backlight_nb); - if (error == 0) - video->backlight_notifier_registered = true; - - return error; -} - -static int acpi_video_bus_remove_backlight_notify_handler( - struct acpi_video_bus *video) -{ - if (!video->backlight_notifier_registered) - return 0; - - video->backlight_notifier_registered = false; - - return backlight_unregister_notifier(&video->backlight_nb); -} - static int acpi_video_bus_put_devices(struct acpi_video_bus *video) { struct acpi_video_device *dev, *next; @@ -2062,7 +1895,6 @@ static int acpi_video_bus_add(struct acpi_device *device) acpi_video_bus_register_backlight(video); acpi_video_bus_add_notify_handler(video); - acpi_video_bus_add_backlight_notify_handler(video); return 0; @@ -2086,7 +1918,6 @@ static int acpi_video_bus_remove(struct acpi_device *device) video = acpi_driver_data(device); - acpi_video_bus_remove_backlight_notify_handler(video); acpi_video_bus_remove_notify_handler(video); acpi_video_bus_unregister_backlight(video); acpi_video_bus_put_devices(video); @@ -2134,22 +1965,25 @@ static int __init intel_opregion_present(void) int acpi_video_register(void) { - int ret; + int ret = 0; + mutex_lock(®ister_count_mutex); if (register_count) { /* * if the function of acpi_video_register is already called, * don't register the acpi_vide_bus again and return no error. */ - return 0; + goto leave; } mutex_init(&video_list_lock); INIT_LIST_HEAD(&video_bus_head); + dmi_check_system(video_dmi_table); + ret = acpi_bus_register_driver(&acpi_video_bus); if (ret) - return ret; + goto leave; /* * When the acpi_video_bus is loaded successfully, increase @@ -2157,24 +1991,20 @@ int acpi_video_register(void) */ register_count = 1; - return 0; +leave: + mutex_unlock(®ister_count_mutex); + return ret; } EXPORT_SYMBOL(acpi_video_register); void acpi_video_unregister(void) { - if (!register_count) { - /* - * If the acpi video bus is already unloaded, don't - * unload it again and return directly. - */ - return; + mutex_lock(®ister_count_mutex); + if (register_count) { + acpi_bus_unregister_driver(&acpi_video_bus); + register_count = 0; } - acpi_bus_unregister_driver(&acpi_video_bus); - - register_count = 0; - - return; + mutex_unlock(®ister_count_mutex); } EXPORT_SYMBOL(acpi_video_unregister); @@ -2182,15 +2012,15 @@ void acpi_video_unregister_backlight(void) { struct acpi_video_bus *video; - if (!register_count) - return; - - mutex_lock(&video_list_lock); - list_for_each_entry(video, &video_bus_head, entry) - acpi_video_bus_unregister_backlight(video); - mutex_unlock(&video_list_lock); + mutex_lock(®ister_count_mutex); + if (register_count) { + mutex_lock(&video_list_lock); + list_for_each_entry(video, &video_bus_head, entry) + acpi_video_bus_unregister_backlight(video); + mutex_unlock(&video_list_lock); + } + mutex_unlock(®ister_count_mutex); } -EXPORT_SYMBOL(acpi_video_unregister_backlight); /* * This is kind of nasty. Hardware using Intel chipsets may require @@ -2212,8 +2042,6 @@ static int __init acpi_video_init(void) if (acpi_disabled) return 0; - dmi_check_system(video_dmi_table); - if (intel_opregion_present()) return 0; @@ -2222,6 +2050,7 @@ static int __init acpi_video_init(void) static void __exit acpi_video_exit(void) { + acpi_video_detect_exit(); acpi_video_unregister(); return; diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index ed65e9c4b5b0..3670bbab57a3 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -35,6 +35,7 @@ #include <linux/nmi.h> #include <linux/hardirq.h> #include <linux/pstore.h> +#include <linux/vmalloc.h> #include <acpi/apei.h> #include "apei-internal.h" diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e82d0976a5d0..2bfd53cbfe80 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -729,10 +729,10 @@ static struct llist_head ghes_estatus_llist; static struct irq_work ghes_proc_irq_work; /* - * NMI may be triggered on any CPU, so ghes_nmi_lock is used for - * mutual exclusion. + * NMI may be triggered on any CPU, so ghes_in_nmi is used for + * having only one concurrent reader. */ -static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); +static atomic_t ghes_in_nmi = ATOMIC_INIT(0); static LIST_HEAD(ghes_nmi); @@ -797,73 +797,75 @@ static void ghes_print_queued_estatus(void) } } +/* Save estatus for further processing in IRQ context */ +static void __process_error(struct ghes *ghes) +{ +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + u32 len, node_len; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic_status *estatus; + + if (ghes_estatus_cached(ghes->estatus)) + return; + + len = cper_estatus_len(ghes->estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + + estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); + if (!estatus_node) + return; + + estatus_node->ghes = ghes; + estatus_node->generic = ghes->generic; + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + memcpy(estatus, ghes->estatus, len); + llist_add(&estatus_node->llnode, &ghes_estatus_llist); +#endif +} + +static void __ghes_panic(struct ghes *ghes) +{ + oops_begin(); + ghes_print_queued_estatus(); + __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); + + /* reboot to log the error! */ + if (panic_timeout == 0) + panic_timeout = ghes_panic_timeout; + panic("Fatal hardware error!"); +} + static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) { - struct ghes *ghes, *ghes_global = NULL; - int sev, sev_global = -1; - int ret = NMI_DONE; + struct ghes *ghes; + int sev, ret = NMI_DONE; + + if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) + return ret; - raw_spin_lock(&ghes_nmi_lock); list_for_each_entry_rcu(ghes, &ghes_nmi, list) { if (ghes_read_estatus(ghes, 1)) { ghes_clear_estatus(ghes); continue; } - sev = ghes_severity(ghes->estatus->error_severity); - if (sev > sev_global) { - sev_global = sev; - ghes_global = ghes; - } - ret = NMI_HANDLED; - } - - if (ret == NMI_DONE) - goto out; - if (sev_global >= GHES_SEV_PANIC) { - oops_begin(); - ghes_print_queued_estatus(); - __ghes_print_estatus(KERN_EMERG, ghes_global->generic, - ghes_global->estatus); - /* reboot to log the error! */ - if (panic_timeout == 0) - panic_timeout = ghes_panic_timeout; - panic("Fatal hardware error!"); - } + sev = ghes_severity(ghes->estatus->error_severity); + if (sev >= GHES_SEV_PANIC) + __ghes_panic(ghes); - list_for_each_entry_rcu(ghes, &ghes_nmi, list) { -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - u32 len, node_len; - struct ghes_estatus_node *estatus_node; - struct acpi_hest_generic_status *estatus; -#endif if (!(ghes->flags & GHES_TO_CLEAR)) continue; -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - if (ghes_estatus_cached(ghes->estatus)) - goto next; - /* Save estatus for further processing in IRQ context */ - len = cper_estatus_len(ghes->estatus); - node_len = GHES_ESTATUS_NODE_LEN(len); - estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, - node_len); - if (estatus_node) { - estatus_node->ghes = ghes; - estatus_node->generic = ghes->generic; - estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - memcpy(estatus, ghes->estatus, len); - llist_add(&estatus_node->llnode, &ghes_estatus_llist); - } -next: -#endif + + __process_error(ghes); ghes_clear_estatus(ghes); + + ret = NMI_HANDLED; } + #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG irq_work_queue(&ghes_proc_irq_work); #endif - -out: - raw_spin_unlock(&ghes_nmi_lock); + atomic_dec(&ghes_in_nmi); return ret; } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 63d43677f644..b3628cc01a53 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -70,6 +70,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); MODULE_DESCRIPTION("ACPI Battery Driver"); MODULE_LICENSE("GPL"); +static async_cookie_t async_cookie; static int battery_bix_broken_package; static int battery_notification_delay_ms; static unsigned int cache_time = 1000; @@ -338,14 +339,6 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -#ifdef CONFIG_ACPI_PROCFS_POWER -inline char *acpi_battery_units(struct acpi_battery *battery) -{ - return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? - "mA" : "mW"; -} -#endif - /* -------------------------------------------------------------------------- Battery Management -------------------------------------------------------------------------- */ @@ -354,14 +347,14 @@ struct acpi_offsets { u8 mode; /* int or string? */ }; -static struct acpi_offsets state_offsets[] = { +static const struct acpi_offsets state_offsets[] = { {offsetof(struct acpi_battery, state), 0}, {offsetof(struct acpi_battery, rate_now), 0}, {offsetof(struct acpi_battery, capacity_now), 0}, {offsetof(struct acpi_battery, voltage_now), 0}, }; -static struct acpi_offsets info_offsets[] = { +static const struct acpi_offsets info_offsets[] = { {offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, design_capacity), 0}, {offsetof(struct acpi_battery, full_charge_capacity), 0}, @@ -377,7 +370,7 @@ static struct acpi_offsets info_offsets[] = { {offsetof(struct acpi_battery, oem_info), 1}, }; -static struct acpi_offsets extended_info_offsets[] = { +static const struct acpi_offsets extended_info_offsets[] = { {offsetof(struct acpi_battery, revision), 0}, {offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, design_capacity), 0}, @@ -402,7 +395,7 @@ static struct acpi_offsets extended_info_offsets[] = { static int extract_package(struct acpi_battery *battery, union acpi_object *package, - struct acpi_offsets *offsets, int num) + const struct acpi_offsets *offsets, int num) { int i; union acpi_object *element; @@ -792,6 +785,12 @@ static void acpi_battery_refresh(struct acpi_battery *battery) #ifdef CONFIG_ACPI_PROCFS_POWER static struct proc_dir_entry *acpi_battery_dir; +static const char *acpi_battery_units(const struct acpi_battery *battery) +{ + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; +} + static int acpi_battery_print_info(struct seq_file *seq, int result) { struct acpi_battery *battery = seq->private; @@ -1125,19 +1124,21 @@ static int battery_notify(struct notifier_block *nb, return 0; } -static int battery_bix_broken_package_quirk(const struct dmi_system_id *d) +static int __init +battery_bix_broken_package_quirk(const struct dmi_system_id *d) { battery_bix_broken_package = 1; return 0; } -static int battery_notification_delay_quirk(const struct dmi_system_id *d) +static int __init +battery_notification_delay_quirk(const struct dmi_system_id *d) { battery_notification_delay_ms = 1000; return 0; } -static struct dmi_system_id bat_dmi_table[] = { +static const struct dmi_system_id bat_dmi_table[] __initconst = { { .callback = battery_bix_broken_package_quirk, .ident = "NEC LZ750/LS", @@ -1292,33 +1293,34 @@ static struct acpi_driver acpi_battery_driver = { static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) { - if (acpi_disabled) - return; + int result; dmi_check_system(bat_dmi_table); - + #ifdef CONFIG_ACPI_PROCFS_POWER acpi_battery_dir = acpi_lock_battery_dir(); if (!acpi_battery_dir) return; #endif - if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { + result = acpi_bus_register_driver(&acpi_battery_driver); #ifdef CONFIG_ACPI_PROCFS_POWER + if (result < 0) acpi_unlock_battery_dir(acpi_battery_dir); #endif - return; - } - return; } static int __init acpi_battery_init(void) { - async_schedule(acpi_battery_init_async, NULL); + if (acpi_disabled) + return -ENODEV; + + async_cookie = async_schedule(acpi_battery_init_async, NULL); return 0; } static void __exit acpi_battery_exit(void) { + async_synchronize_cookie(async_cookie); acpi_bus_unregister_driver(&acpi_battery_driver); #ifdef CONFIG_ACPI_PROCFS_POWER acpi_unlock_battery_dir(acpi_battery_dir); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c412fdb28d34..513e7230e3d0 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -470,6 +470,16 @@ static int __init acpi_bus_init_irq(void) return 0; } +/** + * acpi_early_init - Initialize ACPICA and populate the ACPI namespace. + * + * The ACPI tables are accessible after this, but the handling of events has not + * been initialized and the global lock is not available yet, so AML should not + * be executed at this point. + * + * Doing this before switching the EFI runtime services to virtual mode allows + * the EfiBootServices memory to be freed slightly earlier on boot. + */ void __init acpi_early_init(void) { acpi_status status; @@ -533,26 +543,42 @@ void __init acpi_early_init(void) acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi; } #endif + return; + + error0: + disable_acpi(); +} + +/** + * acpi_subsystem_init - Finalize the early initialization of ACPI. + * + * Switch over the platform to the ACPI mode (if possible), initialize the + * handling of ACPI events, install the interrupt and global lock handlers. + * + * Doing this too early is generally unsafe, but at the same time it needs to be + * done before all things that really depend on ACPI. The right spot appears to + * be before finalizing the EFI initialization. + */ +void __init acpi_subsystem_init(void) +{ + acpi_status status; + + if (acpi_disabled) + return; status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); - goto error0; + disable_acpi(); + } else { + /* + * If the system is using ACPI then we can be reasonably + * confident that any regulators are managed by the firmware + * so tell the regulator core it has everything it needs to + * know. + */ + regulator_has_full_constraints(); } - - /* - * If the system is using ACPI then we can be reasonably - * confident that any regulators are managed by the firmware - * so tell the regulator core it has everything it needs to - * know. - */ - regulator_has_full_constraints(); - - return; - - error0: - disable_acpi(); - return; } static int __init acpi_bus_init(void) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 735db11a9b00..717afcdb5f4a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -98,17 +98,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state) /* * The power resources settings may indicate a power state - * shallower than the actual power state of the device. + * shallower than the actual power state of the device, because + * the same power resources may be referenced by other devices. * - * Moreover, on systems predating ACPI 4.0, if the device - * doesn't depend on any power resources and _PSC returns 3, - * that means "power off". We need to maintain compatibility - * with those systems. + * For systems predating ACPI 4.0 we assume that D3hot is the + * deepest state that can be supported. */ if (psc > result && psc < ACPI_STATE_D3_COLD) result = psc; else if (result == ACPI_STATE_UNKNOWN) - result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc; + result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_HOT : psc; } /* @@ -153,8 +152,8 @@ static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) */ int acpi_device_set_power(struct acpi_device *device, int state) { + int target_state = state; int result = 0; - bool cut_power = false; if (!device || !device->flags.power_manageable || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) @@ -169,11 +168,21 @@ int acpi_device_set_power(struct acpi_device *device, int state) return 0; } - if (!device->power.states[state].flags.valid) { + if (state == ACPI_STATE_D3_COLD) { + /* + * For transitions to D3cold we need to execute _PS3 and then + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; + /* If _PR3 is not available, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { dev_warn(&device->dev, "Power state %s not supported\n", acpi_power_state_string(state)); return -ENODEV; } + if (!device->power.flags.ignore_parent && device->parent && (state < device->parent->power.state)) { dev_warn(&device->dev, @@ -183,39 +192,38 @@ int acpi_device_set_power(struct acpi_device *device, int state) return -ENODEV; } - /* For D3cold we should first transition into D3hot. */ - if (state == ACPI_STATE_D3_COLD - && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) { - state = ACPI_STATE_D3_HOT; - cut_power = true; - } - - if (state < device->power.state && state != ACPI_STATE_D0 - && device->power.state >= ACPI_STATE_D3_HOT) { - dev_warn(&device->dev, - "Cannot transition to non-D0 state from D3\n"); - return -ENODEV; - } - /* * Transition Power * ---------------- - * In accordance with the ACPI specification first apply power (via - * power resources) and then evaluate _PSx. + * In accordance with ACPI 6, _PSx is executed before manipulating power + * resources, unless the target state is D0, in which case _PS0 is + * supposed to be executed after turning the power resources on. */ - if (device->power.flags.power_resources) { - result = acpi_power_transition(device, state); + if (state > ACPI_STATE_D0) { + /* + * According to ACPI 6, devices cannot go from lower-power + * (deeper) states to higher-power (shallower) states. + */ + if (state < device->power.state) { + dev_warn(&device->dev, "Cannot transition from %s to %s\n", + acpi_power_state_string(device->power.state), + acpi_power_state_string(state)); + return -ENODEV; + } + + result = acpi_dev_pm_explicit_set(device, state); if (result) goto end; - } - result = acpi_dev_pm_explicit_set(device, state); - if (result) - goto end; - if (cut_power) { - device->power.state = state; - state = ACPI_STATE_D3_COLD; - result = acpi_power_transition(device, state); + if (device->power.flags.power_resources) + result = acpi_power_transition(device, target_state); + } else { + if (device->power.flags.power_resources) { + result = acpi_power_transition(device, ACPI_STATE_D0); + if (result) + goto end; + } + result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); } end: @@ -264,13 +272,24 @@ int acpi_bus_init_power(struct acpi_device *device) return result; if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) { + /* Reference count the power resources. */ result = acpi_power_on_resources(device, state); if (result) return result; - result = acpi_dev_pm_explicit_set(device, state); - if (result) - return result; + if (state == ACPI_STATE_D0) { + /* + * If _PSC is not present and the state inferred from + * power resources appears to be D0, it still may be + * necessary to execute _PS0 at this point, because + * another device using the same power resources may + * have been put into D0 previously and that's why we + * see D0 here. + */ + result = acpi_dev_pm_explicit_set(device, state); + if (result) + return result; + } } else if (state == ACPI_STATE_UNKNOWN) { /* * No power resources and missing _PSC? Cross fingers and make @@ -603,12 +622,12 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD) return -EINVAL; - if (d_max_in > ACPI_STATE_D3_HOT) { + if (d_max_in > ACPI_STATE_D2) { enum pm_qos_flags_status stat; stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); if (stat == PM_QOS_FLAGS_ALL) - d_max_in = ACPI_STATE_D3_HOT; + d_max_in = ACPI_STATE_D2; } adev = ACPI_COMPANION(dev); @@ -953,6 +972,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare); */ void acpi_subsys_complete(struct device *dev) { + pm_generic_complete(dev); /* * If the device had been runtime-suspended before the system went into * the sleep state it is going out of and it has never been resumed till diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 5e8fed448850..9d4761d2f6b7 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -59,6 +59,38 @@ #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ +/* + * The SCI_EVT clearing timing is not defined by the ACPI specification. + * This leads to lots of practical timing issues for the host EC driver. + * The following variations are defined (from the target EC firmware's + * perspective): + * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the + * target can clear SCI_EVT at any time so long as the host can see + * the indication by reading the status register (EC_SC). So the + * host should re-check SCI_EVT after the first time the SCI_EVT + * indication is seen, which is the same time the query request + * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set + * at any later time could indicate another event. Normally such + * kind of EC firmware has implemented an event queue and will + * return 0x00 to indicate "no outstanding event". + * QUERY: After seeing the query request (QR_EC) written to the command + * register (EC_CMD) by the host and having prepared the responding + * event value in the data register (EC_DATA), the target can safely + * clear SCI_EVT because the target can confirm that the current + * event is being handled by the host. The host then should check + * SCI_EVT right after reading the event response from the data + * register (EC_DATA). + * EVENT: After seeing the event response read from the data register + * (EC_DATA) by the host, the target can clear SCI_EVT. As the + * target requires time to notice the change in the data register + * (EC_DATA), the host may be required to wait additional guarding + * time before checking the SCI_EVT again. Such guarding may not be + * necessary if the host is notified via another IRQ. + */ +#define ACPI_EC_EVT_TIMING_STATUS 0x00 +#define ACPI_EC_EVT_TIMING_QUERY 0x01 +#define ACPI_EC_EVT_TIMING_EVENT 0x02 + /* EC commands */ enum ec_command { ACPI_EC_COMMAND_READ = 0x80, @@ -70,13 +102,13 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ -#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ -#define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */ +#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ + EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and * OpReg are installed */ EC_FLAGS_STARTED, /* Driver is started */ @@ -93,6 +125,16 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); +static bool ec_busy_polling __read_mostly; +module_param(ec_busy_polling, bool, 0644); +MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); + +static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL; +module_param(ec_polling_guard, uint, 0644); +MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes"); + +static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY; + /* * If the number of false interrupts per one transaction exceeds * this threshold, will think there is a GPE storm happened and @@ -121,7 +163,6 @@ struct transaction { u8 wlen; u8 rlen; u8 flags; - unsigned long timestamp; }; static int acpi_ec_query(struct acpi_ec *ec, u8 *data); @@ -130,7 +171,6 @@ static void advance_transaction(struct acpi_ec *ec); struct acpi_ec *boot_ec, *first_ec; EXPORT_SYMBOL(first_ec); -static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -218,7 +258,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x); return x; } @@ -227,14 +267,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) { ec_dbg_raw("EC_SC(W) = 0x%2.2x", command); outb(command, ec->command_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; } static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) { ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data); outb(data, ec->data_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; } #ifdef DEBUG @@ -267,7 +307,7 @@ static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec) acpi_event_status gpe_status = 0; (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); - return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false; + return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false; } static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) @@ -379,19 +419,49 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) static void acpi_ec_submit_query(struct acpi_ec *ec) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { - ec_dbg_req("Event started"); + ec_dbg_evt("Command(%s) submitted/blocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + ec->nr_pending_queries++; schedule_work(&ec->work); } } static void acpi_ec_complete_query(struct acpi_ec *ec) { - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { + if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - ec_dbg_req("Event stopped"); + ec_dbg_evt("Command(%s) unblocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); } } +static bool acpi_ec_guard_event(struct acpi_ec *ec) +{ + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || + ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY || + !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || + (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) + return false; + + /* + * Postpone the query submission to allow the firmware to proceed, + * we shouldn't check SCI_EVT before the firmware reflagging it. + */ + return true; +} + +static int ec_transaction_polled(struct acpi_ec *ec) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ec->lock, flags); + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) + ret = 1; + spin_unlock_irqrestore(&ec->lock, flags); + return ret; +} + static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; @@ -404,6 +474,22 @@ static int ec_transaction_completed(struct acpi_ec *ec) return ret; } +static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) +{ + ec->curr->flags |= flag; + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS && + flag == ACPI_EC_COMMAND_POLL) + acpi_ec_complete_query(ec); + if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY && + flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_complete_query(ec); + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + flag == ACPI_EC_COMMAND_COMPLETE) + set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + } +} + static void advance_transaction(struct acpi_ec *ec) { struct transaction *t; @@ -420,6 +506,18 @@ static void advance_transaction(struct acpi_ec *ec) acpi_ec_clear_gpe(ec); status = acpi_ec_read_status(ec); t = ec->curr; + /* + * Another IRQ or a guarded polling mode advancement is detected, + * the next QR_EC submission is then allowed. + */ + if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + (!ec->nr_pending_queries || + test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { + clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + acpi_ec_complete_query(ec); + } + } if (!t) goto err; if (t->flags & ACPI_EC_COMMAND_POLL) { @@ -432,17 +530,17 @@ static void advance_transaction(struct acpi_ec *ec) if ((status & ACPI_EC_FLAG_OBF) == 1) { t->rdata[t->ri++] = acpi_ec_read_data(ec); if (t->rlen == t->ri) { - t->flags |= ACPI_EC_COMMAND_COMPLETE; + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); if (t->command == ACPI_EC_COMMAND_QUERY) - ec_dbg_req("Command(%s) hardware completion", - acpi_ec_cmd_string(t->command)); + ec_dbg_evt("Command(%s) completed by hardware", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); wakeup = true; } } else goto err; } else if (t->wlen == t->wi && (status & ACPI_EC_FLAG_IBF) == 0) { - t->flags |= ACPI_EC_COMMAND_COMPLETE; + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); wakeup = true; } goto out; @@ -450,17 +548,15 @@ static void advance_transaction(struct acpi_ec *ec) if (EC_FLAGS_QUERY_HANDSHAKE && !(status & ACPI_EC_FLAG_SCI) && (t->command == ACPI_EC_COMMAND_QUERY)) { - t->flags |= ACPI_EC_COMMAND_POLL; - acpi_ec_complete_query(ec); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); t->rdata[t->ri++] = 0x00; - t->flags |= ACPI_EC_COMMAND_COMPLETE; - ec_dbg_req("Command(%s) software completion", - acpi_ec_cmd_string(t->command)); + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); + ec_dbg_evt("Command(%s) completed by software", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); wakeup = true; } else if ((status & ACPI_EC_FLAG_IBF) == 0) { acpi_ec_write_cmd(ec, t->command); - t->flags |= ACPI_EC_COMMAND_POLL; - acpi_ec_complete_query(ec); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); } else goto err; goto out; @@ -490,8 +586,39 @@ static void start_transaction(struct acpi_ec *ec) { ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; ec->curr->flags = 0; - ec->curr->timestamp = jiffies; - advance_transaction(ec); +} + +static int ec_guard(struct acpi_ec *ec) +{ + unsigned long guard = usecs_to_jiffies(ec_polling_guard); + unsigned long timeout = ec->timestamp + guard; + + do { + if (ec_busy_polling) { + /* Perform busy polling */ + if (ec_transaction_completed(ec)) + return 0; + udelay(jiffies_to_usecs(guard)); + } else { + /* + * Perform wait polling + * + * For SCI_EVT clearing timing of "event", + * performing guarding before re-checking the + * SCI_EVT. Otherwise, such guarding is not needed + * due to the old practices. + */ + if (!ec_transaction_polled(ec) && + !acpi_ec_guard_event(ec)) + break; + if (wait_event_timeout(ec->wait, + ec_transaction_completed(ec), + guard)) + return 0; + } + /* Guard the register accesses for the polling modes */ + } while (time_before(jiffies, timeout)); + return -ETIME; } static int ec_poll(struct acpi_ec *ec) @@ -502,25 +629,11 @@ static int ec_poll(struct acpi_ec *ec) while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - unsigned long usecs = ACPI_EC_UDELAY_POLL; do { - /* don't sleep with disabled interrupts */ - if (EC_FLAGS_MSI || irqs_disabled()) { - usecs = ACPI_EC_MSI_UDELAY; - udelay(usecs); - if (ec_transaction_completed(ec)) - return 0; - } else { - if (wait_event_timeout(ec->wait, - ec_transaction_completed(ec), - usecs_to_jiffies(usecs))) - return 0; - } + if (!ec_guard(ec)) + return 0; spin_lock_irqsave(&ec->lock, flags); - if (time_after(jiffies, - ec->curr->timestamp + - usecs_to_jiffies(usecs))) - advance_transaction(ec); + advance_transaction(ec); spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); @@ -537,8 +650,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; - if (EC_FLAGS_MSI) - udelay(ACPI_EC_MSI_UDELAY); /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -552,7 +663,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); start_transaction(ec); spin_unlock_irqrestore(&ec->lock, tmp); + ret = ec_poll(ec); + spin_lock_irqsave(&ec->lock, tmp); if (t->irq_count == ec_storm_threshold) acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM); @@ -575,6 +688,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) return -EINVAL; if (t->rdata) memset(t->rdata, 0, t->rlen); + mutex_lock(&ec->mutex); if (ec->global_lock) { status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); @@ -586,8 +700,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) status = acpi_ec_transaction_unlocked(ec, t); - if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags)) - msleep(1); if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -923,11 +1035,54 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) return result; } -static void acpi_ec_gpe_poller(struct work_struct *work) +static void acpi_ec_check_event(struct acpi_ec *ec) { + unsigned long flags; + + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { + if (ec_guard(ec)) { + spin_lock_irqsave(&ec->lock, flags); + /* + * Take care of the SCI_EVT unless no one else is + * taking care of it. + */ + if (!ec->curr) + advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); + } + } +} + +static void acpi_ec_event_handler(struct work_struct *work) +{ + unsigned long flags; struct acpi_ec *ec = container_of(work, struct acpi_ec, work); - acpi_ec_query(ec, NULL); + ec_dbg_evt("Event started"); + + spin_lock_irqsave(&ec->lock, flags); + while (ec->nr_pending_queries) { + spin_unlock_irqrestore(&ec->lock, flags); + (void)acpi_ec_query(ec, NULL); + spin_lock_irqsave(&ec->lock, flags); + ec->nr_pending_queries--; + /* + * Before exit, make sure that this work item can be + * scheduled again. There might be QR_EC failures, leaving + * EC_FLAGS_QUERY_PENDING uncleared and preventing this work + * item from being scheduled again. + */ + if (!ec->nr_pending_queries) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || + ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY) + acpi_ec_complete_query(ec); + } + } + spin_unlock_irqrestore(&ec->lock, flags); + + ec_dbg_evt("Event stopped"); + + acpi_ec_check_event(ec); } static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, @@ -961,7 +1116,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; - if (EC_FLAGS_MSI || bits > 8) + if (ec_busy_polling || bits > 8) acpi_ec_burst_enable(ec); for (i = 0; i < bytes; ++i, ++address, ++value) @@ -969,7 +1124,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, acpi_ec_read(ec, address, value) : acpi_ec_write(ec, address, *value); - if (EC_FLAGS_MSI || bits > 8) + if (ec_busy_polling || bits > 8) acpi_ec_burst_disable(ec); switch (result) { @@ -1002,7 +1157,8 @@ static struct acpi_ec *make_acpi_ec(void) init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); spin_lock_init(&ec->lock); - INIT_WORK(&ec->work, acpi_ec_gpe_poller); + INIT_WORK(&ec->work, acpi_ec_event_handler); + ec->timestamp = jiffies; return ec; } @@ -1237,30 +1393,13 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) return 0; } -/* MSI EC needs special treatment, enable it */ -static int ec_flag_msi(const struct dmi_system_id *id) -{ - pr_debug("Detected MSI hardware, enabling workarounds.\n"); - EC_FLAGS_MSI = 1; - EC_FLAGS_VALIDATE_ECDT = 1; - return 0; -} - -/* - * Clevo M720 notebook actually works ok with IRQ mode, if we lifted - * the GPE storm threshold back to 20 - */ -static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) -{ - pr_debug("Setting the EC GPE storm threshold to 20\n"); - ec_storm_threshold = 20; - return 0; -} - +#if 0 /* - * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for - * which case, we complete the QR_EC without issuing it to the firmware. - * https://bugzilla.kernel.org/show_bug.cgi?id=86211 + * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not + * set, for which case, we complete the QR_EC without issuing it to the + * firmware. + * https://bugzilla.kernel.org/show_bug.cgi?id=82611 + * https://bugzilla.kernel.org/show_bug.cgi?id=97381 */ static int ec_flag_query_handshake(const struct dmi_system_id *id) { @@ -1268,6 +1407,7 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id) EC_FLAGS_QUERY_HANDSHAKE = 1; return 0; } +#endif /* * On some hardware it is necessary to clear events accumulated by the EC during @@ -1290,6 +1430,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id) { pr_debug("Detected system needing EC poll on resume.\n"); EC_FLAGS_CLEAR_ON_RESUME = 1; + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; return 0; } @@ -1299,29 +1440,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, - { - ec_flag_msi, "Quanta hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), - DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL}, - { - ec_flag_msi, "Quanta hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), - DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, - { - ec_flag_msi, "Clevo W350etq", { - DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."), - DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL}, + ec_validate_ecdt, "MSI MS-171F", { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, @@ -1329,10 +1450,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, { - ec_enlarge_storm_threshold, "CLEVO hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), - DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, - { ec_skip_dsdt_scan, "HP Folio 13", { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, @@ -1343,9 +1460,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_clear_on_resume, "Samsung hardware", { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, - { - ec_flag_query_handshake, "Acer hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, {}, }; @@ -1427,6 +1541,43 @@ error: return -ENODEV; } +static int param_set_event_clearing(const char *val, struct kernel_param *kp) +{ + int result = 0; + + if (!strncmp(val, "status", sizeof("status") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; + pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n"); + } else if (!strncmp(val, "query", sizeof("query") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY; + pr_info("Assuming SCI_EVT clearing on QR_EC writes\n"); + } else if (!strncmp(val, "event", sizeof("event") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT; + pr_info("Assuming SCI_EVT clearing on event reads\n"); + } else + result = -EINVAL; + return result; +} + +static int param_get_event_clearing(char *buffer, struct kernel_param *kp) +{ + switch (ec_event_clearing) { + case ACPI_EC_EVT_TIMING_STATUS: + return sprintf(buffer, "status"); + case ACPI_EC_EVT_TIMING_QUERY: + return sprintf(buffer, "query"); + case ACPI_EC_EVT_TIMING_EVENT: + return sprintf(buffer, "event"); + default: + return sprintf(buffer, "invalid"); + } + return 0; +} + +module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing, + NULL, 0644); +MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing"); + static struct acpi_driver acpi_ec_driver = { .name = "ec", .class = ACPI_EC_CLASS, diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 7a36f02598a6..bea0bbaafa97 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -158,8 +158,9 @@ static int fan_get_state(struct acpi_device *device, unsigned long *state) if (result) return result; - *state = (acpi_state == ACPI_STATE_D3_COLD ? 0 : - (acpi_state == ACPI_STATE_D0 ? 1 : -1)); + *state = acpi_state == ACPI_STATE_D3_COLD + || acpi_state == ACPI_STATE_D3_HOT ? + 0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1); return 0; } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 39c485b0c25c..b9657af751d1 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/acpi.h> +#include <linux/dma-mapping.h> #include "internal.h" @@ -167,6 +168,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; + bool coherent; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -223,6 +225,9 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); + if (acpi_check_dma(acpi_dev, &coherent)) + arch_setup_dma_ops(dev, 0, 0, NULL, coherent); + acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index aafe3ca829c2..a322710b5ba4 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -27,7 +27,7 @@ #include <linux/acpi.h> #include <acpi/hed.h> -static struct acpi_device_id acpi_hed_ids[] = { +static const struct acpi_device_id acpi_hed_ids[] = { {"PNP0C33", 0}, {"", 0}, }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 42f304288829..4683a96932b9 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -139,6 +139,8 @@ struct acpi_ec { struct transaction *curr; spinlock_t lock; struct work_struct work; + unsigned long timestamp; + unsigned long nr_pending_queries; }; extern struct acpi_ec *first_ec; @@ -183,15 +185,10 @@ static inline void suspend_nvs_restore(void) {} #endif /*-------------------------------------------------------------------------- - Video - -------------------------------------------------------------------------- */ -#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) -bool acpi_osi_is_win8(void); -#endif - -/*-------------------------------------------------------------------------- Device properties -------------------------------------------------------------------------- */ +#define ACPI_DT_NAMESPACE_HID "PRP0001" + void acpi_init_properties(struct acpi_device *adev); void acpi_free_properties(struct acpi_device *adev); diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c new file mode 100644 index 000000000000..2161fa178c8d --- /dev/null +++ b/drivers/acpi/nfit.c @@ -0,0 +1,1587 @@ +/* + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include <linux/list_sort.h> +#include <linux/libnvdimm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/ndctl.h> +#include <linux/list.h> +#include <linux/acpi.h> +#include <linux/sort.h> +#include <linux/io.h> +#include "nfit.h" + +/* + * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is + * irrelevant. + */ +#include <asm-generic/io-64-nonatomic-hi-lo.h> + +static bool force_enable_dimms; +module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); + +static u8 nfit_uuid[NFIT_UUID_MAX][16]; + +const u8 *to_nfit_uuid(enum nfit_uuids id) +{ + return nfit_uuid[id]; +} +EXPORT_SYMBOL(to_nfit_uuid); + +static struct acpi_nfit_desc *to_acpi_nfit_desc( + struct nvdimm_bus_descriptor *nd_desc) +{ + return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); +} + +static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + /* + * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct + * acpi_device. + */ + if (!nd_desc->provider_name + || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) + return NULL; + + return to_acpi_device(acpi_desc->dev); +} + +static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + const struct nd_cmd_desc *desc = NULL; + union acpi_object in_obj, in_buf, *out_obj; + struct device *dev = acpi_desc->dev; + const char *cmd_name, *dimm_name; + unsigned long dsm_mask; + acpi_handle handle; + const u8 *uuid; + u32 offset; + int rc, i; + + if (nvdimm) { + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_device *adev = nfit_mem->adev; + + if (!adev) + return -ENOTTY; + dimm_name = nvdimm_name(nvdimm); + cmd_name = nvdimm_cmd_name(cmd); + dsm_mask = nfit_mem->dsm_mask; + desc = nd_cmd_dimm_desc(cmd); + uuid = to_nfit_uuid(NFIT_DEV_DIMM); + handle = adev->handle; + } else { + struct acpi_device *adev = to_acpi_dev(acpi_desc); + + cmd_name = nvdimm_bus_cmd_name(cmd); + dsm_mask = nd_desc->dsm_mask; + desc = nd_cmd_bus_desc(cmd); + uuid = to_nfit_uuid(NFIT_DEV_BUS); + handle = adev->handle; + dimm_name = "bus"; + } + + if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) + return -ENOTTY; + + if (!test_bit(cmd, &dsm_mask)) + return -ENOTTY; + + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_BUFFER; + in_buf.buffer.pointer = buf; + in_buf.buffer.length = 0; + + /* libnvdimm has already validated the input envelope */ + for (i = 0; i < desc->in_num; i++) + in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, + i, buf); + + if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { + dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__, + dimm_name, cmd_name, in_buf.buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, + 4, in_buf.buffer.pointer, min_t(u32, 128, + in_buf.buffer.length), true); + } + + out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj); + if (!out_obj) { + dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, + cmd_name); + return -EINVAL; + } + + if (out_obj->package.type != ACPI_TYPE_BUFFER) { + dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", + __func__, dimm_name, cmd_name, out_obj->type); + rc = -EINVAL; + goto out; + } + + if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { + dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, + dimm_name, cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, + 4, out_obj->buffer.pointer, min_t(u32, 128, + out_obj->buffer.length), true); + } + + for (i = 0, offset = 0; i < desc->out_num; i++) { + u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, + (u32 *) out_obj->buffer.pointer); + + if (offset + out_size > out_obj->buffer.length) { + dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", + __func__, dimm_name, cmd_name, i); + break; + } + + if (in_buf.buffer.length + offset + out_size > buf_len) { + dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", + __func__, dimm_name, cmd_name, i); + rc = -ENXIO; + goto out; + } + memcpy(buf + in_buf.buffer.length + offset, + out_obj->buffer.pointer + offset, out_size); + offset += out_size; + } + if (offset + in_buf.buffer.length < buf_len) { + if (i >= 1) { + /* + * status valid, return the number of bytes left + * unfilled in the output buffer + */ + rc = buf_len - offset - in_buf.buffer.length; + } else { + dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", + __func__, dimm_name, cmd_name, buf_len, + offset); + rc = -ENXIO; + } + } else + rc = 0; + + out: + ACPI_FREE(out_obj); + + return rc; +} + +static const char *spa_type_name(u16 type) +{ + static const char *to_name[] = { + [NFIT_SPA_VOLATILE] = "volatile", + [NFIT_SPA_PM] = "pmem", + [NFIT_SPA_DCR] = "dimm-control-region", + [NFIT_SPA_BDW] = "block-data-window", + [NFIT_SPA_VDISK] = "volatile-disk", + [NFIT_SPA_VCD] = "volatile-cd", + [NFIT_SPA_PDISK] = "persistent-disk", + [NFIT_SPA_PCD] = "persistent-cd", + + }; + + if (type > NFIT_SPA_PCD) + return "unknown"; + + return to_name[type]; +} + +static int nfit_spa_type(struct acpi_nfit_system_address *spa) +{ + int i; + + for (i = 0; i < NFIT_UUID_MAX; i++) + if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) + return i; + return -1; +} + +static bool add_spa(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + struct device *dev = acpi_desc->dev; + struct nfit_spa *nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), + GFP_KERNEL); + + if (!nfit_spa) + return false; + INIT_LIST_HEAD(&nfit_spa->list); + nfit_spa->spa = spa; + list_add_tail(&nfit_spa->list, &acpi_desc->spas); + dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, + spa->range_index, + spa_type_name(nfit_spa_type(spa))); + return true; +} + +static bool add_memdev(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_memory_map *memdev) +{ + struct device *dev = acpi_desc->dev; + struct nfit_memdev *nfit_memdev = devm_kzalloc(dev, + sizeof(*nfit_memdev), GFP_KERNEL); + + if (!nfit_memdev) + return false; + INIT_LIST_HEAD(&nfit_memdev->list); + nfit_memdev->memdev = memdev; + list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); + dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", + __func__, memdev->device_handle, memdev->range_index, + memdev->region_index); + return true; +} + +static bool add_dcr(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_control_region *dcr) +{ + struct device *dev = acpi_desc->dev; + struct nfit_dcr *nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), + GFP_KERNEL); + + if (!nfit_dcr) + return false; + INIT_LIST_HEAD(&nfit_dcr->list); + nfit_dcr->dcr = dcr; + list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); + dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, + dcr->region_index, dcr->windows); + return true; +} + +static bool add_bdw(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_data_region *bdw) +{ + struct device *dev = acpi_desc->dev; + struct nfit_bdw *nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), + GFP_KERNEL); + + if (!nfit_bdw) + return false; + INIT_LIST_HEAD(&nfit_bdw->list); + nfit_bdw->bdw = bdw; + list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); + dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, + bdw->region_index, bdw->windows); + return true; +} + +static bool add_idt(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_interleave *idt) +{ + struct device *dev = acpi_desc->dev; + struct nfit_idt *nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), + GFP_KERNEL); + + if (!nfit_idt) + return false; + INIT_LIST_HEAD(&nfit_idt->list); + nfit_idt->idt = idt; + list_add_tail(&nfit_idt->list, &acpi_desc->idts); + dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, + idt->interleave_index, idt->line_count); + return true; +} + +static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, + const void *end) +{ + struct device *dev = acpi_desc->dev; + struct acpi_nfit_header *hdr; + void *err = ERR_PTR(-ENOMEM); + + if (table >= end) + return NULL; + + hdr = table; + switch (hdr->type) { + case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: + if (!add_spa(acpi_desc, table)) + return err; + break; + case ACPI_NFIT_TYPE_MEMORY_MAP: + if (!add_memdev(acpi_desc, table)) + return err; + break; + case ACPI_NFIT_TYPE_CONTROL_REGION: + if (!add_dcr(acpi_desc, table)) + return err; + break; + case ACPI_NFIT_TYPE_DATA_REGION: + if (!add_bdw(acpi_desc, table)) + return err; + break; + case ACPI_NFIT_TYPE_INTERLEAVE: + if (!add_idt(acpi_desc, table)) + return err; + break; + case ACPI_NFIT_TYPE_FLUSH_ADDRESS: + dev_dbg(dev, "%s: flush\n", __func__); + break; + case ACPI_NFIT_TYPE_SMBIOS: + dev_dbg(dev, "%s: smbios\n", __func__); + break; + default: + dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); + break; + } + + return table + hdr->length; +} + +static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem) +{ + u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; + u16 dcr = nfit_mem->dcr->region_index; + struct nfit_spa *nfit_spa; + + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + u16 range_index = nfit_spa->spa->range_index; + int type = nfit_spa_type(nfit_spa->spa); + struct nfit_memdev *nfit_memdev; + + if (type != NFIT_SPA_BDW) + continue; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + if (nfit_memdev->memdev->range_index != range_index) + continue; + if (nfit_memdev->memdev->device_handle != device_handle) + continue; + if (nfit_memdev->memdev->region_index != dcr) + continue; + + nfit_mem->spa_bdw = nfit_spa->spa; + return; + } + } + + dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", + nfit_mem->spa_dcr->range_index); + nfit_mem->bdw = NULL; +} + +static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) +{ + u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; + struct nfit_memdev *nfit_memdev; + struct nfit_dcr *nfit_dcr; + struct nfit_bdw *nfit_bdw; + struct nfit_idt *nfit_idt; + u16 idt_idx, range_index; + + list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { + if (nfit_dcr->dcr->region_index != dcr) + continue; + nfit_mem->dcr = nfit_dcr->dcr; + break; + } + + if (!nfit_mem->dcr) { + dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", + spa->range_index, __to_nfit_memdev(nfit_mem) + ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); + return -ENODEV; + } + + /* + * We've found enough to create an nvdimm, optionally + * find an associated BDW + */ + list_add(&nfit_mem->list, &acpi_desc->dimms); + + list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { + if (nfit_bdw->bdw->region_index != dcr) + continue; + nfit_mem->bdw = nfit_bdw->bdw; + break; + } + + if (!nfit_mem->bdw) + return 0; + + nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); + + if (!nfit_mem->spa_bdw) + return 0; + + range_index = nfit_mem->spa_bdw->range_index; + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + if (nfit_memdev->memdev->range_index != range_index || + nfit_memdev->memdev->region_index != dcr) + continue; + nfit_mem->memdev_bdw = nfit_memdev->memdev; + idt_idx = nfit_memdev->memdev->interleave_index; + list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { + if (nfit_idt->idt->interleave_index != idt_idx) + continue; + nfit_mem->idt_bdw = nfit_idt->idt; + break; + } + break; + } + + return 0; +} + +static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + struct nfit_mem *nfit_mem, *found; + struct nfit_memdev *nfit_memdev; + int type = nfit_spa_type(spa); + u16 dcr; + + switch (type) { + case NFIT_SPA_DCR: + case NFIT_SPA_PM: + break; + default: + return 0; + } + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + int rc; + + if (nfit_memdev->memdev->range_index != spa->range_index) + continue; + found = NULL; + dcr = nfit_memdev->memdev->region_index; + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) + if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { + found = nfit_mem; + break; + } + + if (found) + nfit_mem = found; + else { + nfit_mem = devm_kzalloc(acpi_desc->dev, + sizeof(*nfit_mem), GFP_KERNEL); + if (!nfit_mem) + return -ENOMEM; + INIT_LIST_HEAD(&nfit_mem->list); + } + + if (type == NFIT_SPA_DCR) { + struct nfit_idt *nfit_idt; + u16 idt_idx; + + /* multiple dimms may share a SPA when interleaved */ + nfit_mem->spa_dcr = spa; + nfit_mem->memdev_dcr = nfit_memdev->memdev; + idt_idx = nfit_memdev->memdev->interleave_index; + list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { + if (nfit_idt->idt->interleave_index != idt_idx) + continue; + nfit_mem->idt_dcr = nfit_idt->idt; + break; + } + } else { + /* + * A single dimm may belong to multiple SPA-PM + * ranges, record at least one in addition to + * any SPA-DCR range. + */ + nfit_mem->memdev_pmem = nfit_memdev->memdev; + } + + if (found) + continue; + + rc = nfit_mem_add(acpi_desc, nfit_mem, spa); + if (rc) + return rc; + } + + return 0; +} + +static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) +{ + struct nfit_mem *a = container_of(_a, typeof(*a), list); + struct nfit_mem *b = container_of(_b, typeof(*b), list); + u32 handleA, handleB; + + handleA = __to_nfit_memdev(a)->device_handle; + handleB = __to_nfit_memdev(b)->device_handle; + if (handleA < handleB) + return -1; + else if (handleA > handleB) + return 1; + return 0; +} + +static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_spa *nfit_spa; + + /* + * For each SPA-DCR or SPA-PMEM address range find its + * corresponding MEMDEV(s). From each MEMDEV find the + * corresponding DCR. Then, if we're operating on a SPA-DCR, + * try to find a SPA-BDW and a corresponding BDW that references + * the DCR. Throw it all into an nfit_mem object. Note, that + * BDWs are optional. + */ + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + int rc; + + rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); + if (rc) + return rc; + } + + list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); + + return 0; +} + +static ssize_t revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); +} +static DEVICE_ATTR_RO(revision); + +static struct attribute *acpi_nfit_attributes[] = { + &dev_attr_revision.attr, + NULL, +}; + +static struct attribute_group acpi_nfit_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_attributes, +}; + +const struct attribute_group *acpi_nfit_attribute_groups[] = { + &nvdimm_bus_attribute_group, + &acpi_nfit_attribute_group, + NULL, +}; +EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); + +static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + return __to_nfit_memdev(nfit_mem); +} + +static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + return nfit_mem->dcr; +} + +static ssize_t handle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); + + return sprintf(buf, "%#x\n", memdev->device_handle); +} +static DEVICE_ATTR_RO(handle); + +static ssize_t phys_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); + + return sprintf(buf, "%#x\n", memdev->physical_id); +} +static DEVICE_ATTR_RO(phys_id); + +static ssize_t vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "%#x\n", dcr->vendor_id); +} +static DEVICE_ATTR_RO(vendor); + +static ssize_t rev_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "%#x\n", dcr->revision_id); +} +static DEVICE_ATTR_RO(rev_id); + +static ssize_t device_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "%#x\n", dcr->device_id); +} +static DEVICE_ATTR_RO(device); + +static ssize_t format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "%#x\n", dcr->code); +} +static DEVICE_ATTR_RO(format); + +static ssize_t serial_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "%#x\n", dcr->serial_number); +} +static DEVICE_ATTR_RO(serial); + +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 flags = to_nfit_memdev(dev)->flags; + + return sprintf(buf, "%s%s%s%s%s\n", + flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "", + flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "", + flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "", + flags & ACPI_NFIT_MEM_ARMED ? "arm " : "", + flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : ""); +} +static DEVICE_ATTR_RO(flags); + +static struct attribute *acpi_nfit_dimm_attributes[] = { + &dev_attr_handle.attr, + &dev_attr_phys_id.attr, + &dev_attr_vendor.attr, + &dev_attr_device.attr, + &dev_attr_format.attr, + &dev_attr_serial.attr, + &dev_attr_rev_id.attr, + &dev_attr_flags.attr, + NULL, +}; + +static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + + if (to_nfit_dcr(dev)) + return a->mode; + else + return 0; +} + +static struct attribute_group acpi_nfit_dimm_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_dimm_attributes, + .is_visible = acpi_nfit_dimm_attr_visible, +}; + +static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { + &nvdimm_attribute_group, + &nd_device_attribute_group, + &acpi_nfit_dimm_attribute_group, + NULL, +}; + +static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, + u32 device_handle) +{ + struct nfit_mem *nfit_mem; + + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) + if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) + return nfit_mem->nvdimm; + + return NULL; +} + +static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem, u32 device_handle) +{ + struct acpi_device *adev, *adev_dimm; + struct device *dev = acpi_desc->dev; + const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); + unsigned long long sta; + int i, rc = -ENODEV; + acpi_status status; + + nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; + adev = to_acpi_dev(acpi_desc); + if (!adev) + return 0; + + adev_dimm = acpi_find_child_device(adev, device_handle, false); + nfit_mem->adev = adev_dimm; + if (!adev_dimm) { + dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", + device_handle); + return force_enable_dimms ? 0 : -ENODEV; + } + + status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta); + if (status == AE_NOT_FOUND) { + dev_dbg(dev, "%s missing _STA, assuming enabled...\n", + dev_name(&adev_dimm->dev)); + rc = 0; + } else if (ACPI_FAILURE(status)) + dev_err(dev, "%s failed to retrieve_STA, disabling...\n", + dev_name(&adev_dimm->dev)); + else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0) + dev_info(dev, "%s disabled by firmware\n", + dev_name(&adev_dimm->dev)); + else + rc = 0; + + for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) + if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) + set_bit(i, &nfit_mem->dsm_mask); + + return force_enable_dimms ? 0 : rc; +} + +static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_mem *nfit_mem; + int dimm_count = 0; + + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { + struct nvdimm *nvdimm; + unsigned long flags = 0; + u32 device_handle; + u16 mem_flags; + int rc; + + device_handle = __to_nfit_memdev(nfit_mem)->device_handle; + nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); + if (nvdimm) { + /* + * If for some reason we find multiple DCRs the + * first one wins + */ + dev_err(acpi_desc->dev, "duplicate DCR detected: %s\n", + nvdimm_name(nvdimm)); + continue; + } + + if (nfit_mem->bdw && nfit_mem->memdev_pmem) + flags |= NDD_ALIASING; + + mem_flags = __to_nfit_memdev(nfit_mem)->flags; + if (mem_flags & ACPI_NFIT_MEM_ARMED) + flags |= NDD_UNARMED; + + rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); + if (rc) + continue; + + nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, + acpi_nfit_dimm_attribute_groups, + flags, &nfit_mem->dsm_mask); + if (!nvdimm) + return -ENOMEM; + + nfit_mem->nvdimm = nvdimm; + dimm_count++; + + if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) + continue; + + dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n", + nvdimm_name(nvdimm), + mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "", + mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "", + mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "", + mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : ""); + + } + + return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); +} + +static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); + struct acpi_device *adev; + int i; + + adev = to_acpi_dev(acpi_desc); + if (!adev) + return; + + for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) + if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) + set_bit(i, &nd_desc->dsm_mask); +} + +static ssize_t range_index_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); + + return sprintf(buf, "%d\n", nfit_spa->spa->range_index); +} +static DEVICE_ATTR_RO(range_index); + +static struct attribute *acpi_nfit_region_attributes[] = { + &dev_attr_range_index.attr, + NULL, +}; + +static struct attribute_group acpi_nfit_region_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_region_attributes, +}; + +static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { + &nd_region_attribute_group, + &nd_mapping_attribute_group, + &nd_device_attribute_group, + &nd_numa_attribute_group, + &acpi_nfit_region_attribute_group, + NULL, +}; + +/* enough info to uniquely specify an interleave set */ +struct nfit_set_info { + struct nfit_set_info_map { + u64 region_offset; + u32 serial_number; + u32 pad; + } mapping[0]; +}; + +static size_t sizeof_nfit_set_info(int num_mappings) +{ + return sizeof(struct nfit_set_info) + + num_mappings * sizeof(struct nfit_set_info_map); +} + +static int cmp_map(const void *m0, const void *m1) +{ + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; + + return memcmp(&map0->region_offset, &map1->region_offset, + sizeof(u64)); +} + +/* Retrieve the nth entry referencing this spa */ +static struct acpi_nfit_memory_map *memdev_from_spa( + struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) +{ + struct nfit_memdev *nfit_memdev; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) + if (nfit_memdev->memdev->range_index == range_index) + if (n-- == 0) + return nfit_memdev->memdev; + return NULL; +} + +static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, + struct nd_region_desc *ndr_desc, + struct acpi_nfit_system_address *spa) +{ + int i, spa_type = nfit_spa_type(spa); + struct device *dev = acpi_desc->dev; + struct nd_interleave_set *nd_set; + u16 nr = ndr_desc->num_mappings; + struct nfit_set_info *info; + + if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) + /* pass */; + else + return 0; + + nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); + if (!nd_set) + return -ENOMEM; + + info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); + if (!info) + return -ENOMEM; + for (i = 0; i < nr; i++) { + struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; + struct nfit_set_info_map *map = &info->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, + spa->range_index, i); + + if (!memdev || !nfit_mem->dcr) { + dev_err(dev, "%s: failed to find DCR\n", __func__); + return -ENODEV; + } + + map->region_offset = memdev->region_offset; + map->serial_number = nfit_mem->dcr->serial_number; + } + + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), + cmp_map, NULL); + nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + ndr_desc->nd_set = nd_set; + devm_kfree(dev, info); + + return 0; +} + +static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) +{ + struct acpi_nfit_interleave *idt = mmio->idt; + u32 sub_line_offset, line_index, line_offset; + u64 line_no, table_skip_count, table_offset; + + line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); + table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); + line_offset = idt->line_offset[line_index] + * mmio->line_size; + table_offset = table_skip_count * mmio->table_size; + + return mmio->base_offset + line_offset + table_offset + sub_line_offset; +} + +static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) +{ + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + u64 offset = nfit_blk->stat_offset + mmio->size * bw; + + if (mmio->num_lines) + offset = to_interleave_offset(offset, mmio); + + return readq(mmio->base + offset); +} + +static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, + resource_size_t dpa, unsigned int len, unsigned int write) +{ + u64 cmd, offset; + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + + enum { + BCW_OFFSET_MASK = (1ULL << 48)-1, + BCW_LEN_SHIFT = 48, + BCW_LEN_MASK = (1ULL << 8) - 1, + BCW_CMD_SHIFT = 56, + }; + + cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; + len = len >> L1_CACHE_SHIFT; + cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; + cmd |= ((u64) write) << BCW_CMD_SHIFT; + + offset = nfit_blk->cmd_offset + mmio->size * bw; + if (mmio->num_lines) + offset = to_interleave_offset(offset, mmio); + + writeq(cmd, mmio->base + offset); + /* FIXME: conditionally perform read-back if mandated by firmware */ +} + +static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, + resource_size_t dpa, void *iobuf, size_t len, int rw, + unsigned int lane) +{ + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; + unsigned int copied = 0; + u64 base_offset; + int rc; + + base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES + + lane * mmio->size; + /* TODO: non-temporal access, flush hints, cache management etc... */ + write_blk_ctl(nfit_blk, lane, dpa, len, rw); + while (len) { + unsigned int c; + u64 offset; + + if (mmio->num_lines) { + u32 line_offset; + + offset = to_interleave_offset(base_offset + copied, + mmio); + div_u64_rem(offset, mmio->line_size, &line_offset); + c = min_t(size_t, len, mmio->line_size - line_offset); + } else { + offset = base_offset + nfit_blk->bdw_offset; + c = len; + } + + if (rw) + memcpy(mmio->aperture + offset, iobuf + copied, c); + else + memcpy(iobuf + copied, mmio->aperture + offset, c); + + copied += c; + len -= c; + } + rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; + return rc; +} + +static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, + resource_size_t dpa, void *iobuf, u64 len, int rw) +{ + struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; + struct nd_region *nd_region = nfit_blk->nd_region; + unsigned int lane, copied = 0; + int rc = 0; + + lane = nd_region_acquire_lane(nd_region); + while (len) { + u64 c = min(len, mmio->size); + + rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, + iobuf + copied, c, rw, lane); + if (rc) + break; + + copied += c; + len -= c; + } + nd_region_release_lane(nd_region, lane); + + return rc; +} + +static void nfit_spa_mapping_release(struct kref *kref) +{ + struct nfit_spa_mapping *spa_map = to_spa_map(kref); + struct acpi_nfit_system_address *spa = spa_map->spa; + struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; + + WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); + dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); + iounmap(spa_map->iomem); + release_mem_region(spa->address, spa->length); + list_del(&spa_map->list); + kfree(spa_map); +} + +static struct nfit_spa_mapping *find_spa_mapping( + struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + struct nfit_spa_mapping *spa_map; + + WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); + list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) + if (spa_map->spa == spa) + return spa_map; + + return NULL; +} + +static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + struct nfit_spa_mapping *spa_map; + + mutex_lock(&acpi_desc->spa_map_mutex); + spa_map = find_spa_mapping(acpi_desc, spa); + + if (spa_map) + kref_put(&spa_map->kref, nfit_spa_mapping_release); + mutex_unlock(&acpi_desc->spa_map_mutex); +} + +static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + resource_size_t start = spa->address; + resource_size_t n = spa->length; + struct nfit_spa_mapping *spa_map; + struct resource *res; + + WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); + + spa_map = find_spa_mapping(acpi_desc, spa); + if (spa_map) { + kref_get(&spa_map->kref); + return spa_map->iomem; + } + + spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); + if (!spa_map) + return NULL; + + INIT_LIST_HEAD(&spa_map->list); + spa_map->spa = spa; + kref_init(&spa_map->kref); + spa_map->acpi_desc = acpi_desc; + + res = request_mem_region(start, n, dev_name(acpi_desc->dev)); + if (!res) + goto err_mem; + + /* TODO: cacheability based on the spa type */ + spa_map->iomem = ioremap_nocache(start, n); + if (!spa_map->iomem) + goto err_map; + + list_add_tail(&spa_map->list, &acpi_desc->spa_maps); + return spa_map->iomem; + + err_map: + release_mem_region(start, n); + err_mem: + kfree(spa_map); + return NULL; +} + +/** + * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges + * @nvdimm_bus: NFIT-bus that provided the spa table entry + * @nfit_spa: spa table to map + * + * In the case where block-data-window apertures and + * dimm-control-regions are interleaved they will end up sharing a + * single request_mem_region() + ioremap() for the address range. In + * the style of devm nfit_spa_map() mappings are automatically dropped + * when all region devices referencing the same mapping are disabled / + * unbound. + */ +static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + void __iomem *iomem; + + mutex_lock(&acpi_desc->spa_map_mutex); + iomem = __nfit_spa_map(acpi_desc, spa); + mutex_unlock(&acpi_desc->spa_map_mutex); + + return iomem; +} + +static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, + struct acpi_nfit_interleave *idt, u16 interleave_ways) +{ + if (idt) { + mmio->num_lines = idt->line_count; + mmio->line_size = idt->line_size; + if (interleave_ways == 0) + return -ENXIO; + mmio->table_size = mmio->num_lines * interleave_ways + * mmio->line_size; + } + + return 0; +} + +static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, + struct device *dev) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_blk_region *ndbr = to_nd_blk_region(dev); + struct nfit_blk_mmio *mmio; + struct nfit_blk *nfit_blk; + struct nfit_mem *nfit_mem; + struct nvdimm *nvdimm; + int rc; + + nvdimm = nd_blk_region_to_dimm(ndbr); + nfit_mem = nvdimm_provider_data(nvdimm); + if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { + dev_dbg(dev, "%s: missing%s%s%s\n", __func__, + nfit_mem ? "" : " nfit_mem", + nfit_mem->dcr ? "" : " dcr", + nfit_mem->bdw ? "" : " bdw"); + return -ENXIO; + } + + nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); + if (!nfit_blk) + return -ENOMEM; + nd_blk_region_set_provider_data(ndbr, nfit_blk); + nfit_blk->nd_region = to_nd_region(dev); + + /* map block aperture memory */ + nfit_blk->bdw_offset = nfit_mem->bdw->offset; + mmio = &nfit_blk->mmio[BDW]; + mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw); + if (!mmio->base) { + dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, + nvdimm_name(nvdimm)); + return -ENOMEM; + } + mmio->size = nfit_mem->bdw->size; + mmio->base_offset = nfit_mem->memdev_bdw->region_offset; + mmio->idt = nfit_mem->idt_bdw; + mmio->spa = nfit_mem->spa_bdw; + rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, + nfit_mem->memdev_bdw->interleave_ways); + if (rc) { + dev_dbg(dev, "%s: %s failed to init bdw interleave\n", + __func__, nvdimm_name(nvdimm)); + return rc; + } + + /* map block control memory */ + nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; + nfit_blk->stat_offset = nfit_mem->dcr->status_offset; + mmio = &nfit_blk->mmio[DCR]; + mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr); + if (!mmio->base) { + dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, + nvdimm_name(nvdimm)); + return -ENOMEM; + } + mmio->size = nfit_mem->dcr->window_size; + mmio->base_offset = nfit_mem->memdev_dcr->region_offset; + mmio->idt = nfit_mem->idt_dcr; + mmio->spa = nfit_mem->spa_dcr; + rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, + nfit_mem->memdev_dcr->interleave_ways); + if (rc) { + dev_dbg(dev, "%s: %s failed to init dcr interleave\n", + __func__, nvdimm_name(nvdimm)); + return rc; + } + + if (mmio->line_size == 0) + return 0; + + if ((u32) nfit_blk->cmd_offset % mmio->line_size + + 8 > mmio->line_size) { + dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); + return -ENXIO; + } else if ((u32) nfit_blk->stat_offset % mmio->line_size + + 8 > mmio->line_size) { + dev_dbg(dev, "stat_offset crosses interleave boundary\n"); + return -ENXIO; + } + + return 0; +} + +static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, + struct device *dev) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_blk_region *ndbr = to_nd_blk_region(dev); + struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); + int i; + + if (!nfit_blk) + return; /* never enabled */ + + /* auto-free BLK spa mappings */ + for (i = 0; i < 2; i++) { + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; + + if (mmio->base) + nfit_spa_unmap(acpi_desc, mmio->spa); + } + nd_blk_region_set_provider_data(ndbr, NULL); + /* devm will free nfit_blk */ +} + +static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, + struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, + struct acpi_nfit_memory_map *memdev, + struct acpi_nfit_system_address *spa) +{ + struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, + memdev->device_handle); + struct nd_blk_region_desc *ndbr_desc; + struct nfit_mem *nfit_mem; + int blk_valid = 0; + + if (!nvdimm) { + dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", + spa->range_index, memdev->device_handle); + return -ENODEV; + } + + nd_mapping->nvdimm = nvdimm; + switch (nfit_spa_type(spa)) { + case NFIT_SPA_PM: + case NFIT_SPA_VOLATILE: + nd_mapping->start = memdev->address; + nd_mapping->size = memdev->region_size; + break; + case NFIT_SPA_DCR: + nfit_mem = nvdimm_provider_data(nvdimm); + if (!nfit_mem || !nfit_mem->bdw) { + dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", + spa->range_index, nvdimm_name(nvdimm)); + } else { + nd_mapping->size = nfit_mem->bdw->capacity; + nd_mapping->start = nfit_mem->bdw->start_address; + ndr_desc->num_lanes = nfit_mem->bdw->windows; + blk_valid = 1; + } + + ndr_desc->nd_mapping = nd_mapping; + ndr_desc->num_mappings = blk_valid; + ndbr_desc = to_blk_region_desc(ndr_desc); + ndbr_desc->enable = acpi_nfit_blk_region_enable; + ndbr_desc->disable = acpi_nfit_blk_region_disable; + ndbr_desc->do_io = acpi_desc->blk_do_io; + if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) + return -ENOMEM; + break; + } + + return 0; +} + +static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) +{ + static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; + struct acpi_nfit_system_address *spa = nfit_spa->spa; + struct nd_blk_region_desc ndbr_desc; + struct nd_region_desc *ndr_desc; + struct nfit_memdev *nfit_memdev; + struct nvdimm_bus *nvdimm_bus; + struct resource res; + int count = 0, rc; + + if (spa->range_index == 0) { + dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", + __func__); + return 0; + } + + memset(&res, 0, sizeof(res)); + memset(&nd_mappings, 0, sizeof(nd_mappings)); + memset(&ndbr_desc, 0, sizeof(ndbr_desc)); + res.start = spa->address; + res.end = res.start + spa->length - 1; + ndr_desc = &ndbr_desc.ndr_desc; + ndr_desc->res = &res; + ndr_desc->provider_data = nfit_spa; + ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; + if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) + ndr_desc->numa_node = acpi_map_pxm_to_online_node( + spa->proximity_domain); + else + ndr_desc->numa_node = NUMA_NO_NODE; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; + struct nd_mapping *nd_mapping; + + if (memdev->range_index != spa->range_index) + continue; + if (count >= ND_MAX_MAPPINGS) { + dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", + spa->range_index, ND_MAX_MAPPINGS); + return -ENXIO; + } + nd_mapping = &nd_mappings[count++]; + rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, + memdev, spa); + if (rc) + return rc; + } + + ndr_desc->nd_mapping = nd_mappings; + ndr_desc->num_mappings = count; + rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); + if (rc) + return rc; + + nvdimm_bus = acpi_desc->nvdimm_bus; + if (nfit_spa_type(spa) == NFIT_SPA_PM) { + if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) + return -ENOMEM; + } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { + if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) + return -ENOMEM; + } + return 0; +} + +static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_spa *nfit_spa; + + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); + + if (rc) + return rc; + } + return 0; +} + +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) +{ + struct device *dev = acpi_desc->dev; + const void *end; + u8 *data; + int rc; + + INIT_LIST_HEAD(&acpi_desc->spa_maps); + INIT_LIST_HEAD(&acpi_desc->spas); + INIT_LIST_HEAD(&acpi_desc->dcrs); + INIT_LIST_HEAD(&acpi_desc->bdws); + INIT_LIST_HEAD(&acpi_desc->idts); + INIT_LIST_HEAD(&acpi_desc->memdevs); + INIT_LIST_HEAD(&acpi_desc->dimms); + mutex_init(&acpi_desc->spa_map_mutex); + + data = (u8 *) acpi_desc->nfit; + end = data + sz; + data += sizeof(struct acpi_table_nfit); + while (!IS_ERR_OR_NULL(data)) + data = add_table(acpi_desc, data, end); + + if (IS_ERR(data)) { + dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, + PTR_ERR(data)); + return PTR_ERR(data); + } + + if (nfit_mem_init(acpi_desc) != 0) + return -ENOMEM; + + acpi_nfit_init_dsms(acpi_desc); + + rc = acpi_nfit_register_dimms(acpi_desc); + if (rc) + return rc; + + return acpi_nfit_register_regions(acpi_desc); +} +EXPORT_SYMBOL_GPL(acpi_nfit_init); + +static int acpi_nfit_add(struct acpi_device *adev) +{ + struct nvdimm_bus_descriptor *nd_desc; + struct acpi_nfit_desc *acpi_desc; + struct device *dev = &adev->dev; + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + acpi_size sz; + int rc; + + status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to find NFIT\n"); + return -ENXIO; + } + + acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); + if (!acpi_desc) + return -ENOMEM; + + dev_set_drvdata(dev, acpi_desc); + acpi_desc->dev = dev; + acpi_desc->nfit = (struct acpi_table_nfit *) tbl; + acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; + nd_desc = &acpi_desc->nd_desc; + nd_desc->provider_name = "ACPI.NFIT"; + nd_desc->ndctl = acpi_nfit_ctl; + nd_desc->attr_groups = acpi_nfit_attribute_groups; + + acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc); + if (!acpi_desc->nvdimm_bus) + return -ENXIO; + + rc = acpi_nfit_init(acpi_desc, sz); + if (rc) { + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + return rc; + } + return 0; +} + +static int acpi_nfit_remove(struct acpi_device *adev) +{ + struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); + + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + return 0; +} + +static const struct acpi_device_id acpi_nfit_ids[] = { + { "ACPI0012", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); + +static struct acpi_driver acpi_nfit_driver = { + .name = KBUILD_MODNAME, + .ids = acpi_nfit_ids, + .ops = { + .add = acpi_nfit_add, + .remove = acpi_nfit_remove, + }, +}; + +static __init int nfit_init(void) +{ + BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); + BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); + BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); + BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); + BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); + BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); + BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); + + acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); + acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); + acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); + acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); + acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); + acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); + acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); + acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); + acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); + acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); + + return acpi_bus_register_driver(&acpi_nfit_driver); +} + +static __exit void nfit_exit(void) +{ + acpi_bus_unregister_driver(&acpi_nfit_driver); +} + +module_init(nfit_init); +module_exit(nfit_exit); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h new file mode 100644 index 000000000000..81f2e8c5a79c --- /dev/null +++ b/drivers/acpi/nfit.h @@ -0,0 +1,158 @@ +/* + * NVDIMM Firmware Interface Table - NFIT + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __NFIT_H__ +#define __NFIT_H__ +#include <linux/libnvdimm.h> +#include <linux/types.h> +#include <linux/uuid.h> +#include <linux/acpi.h> +#include <acpi/acuuid.h> + +#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba" +#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" +#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ + | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ + | ACPI_NFIT_MEM_ARMED) + +enum nfit_uuids { + NFIT_SPA_VOLATILE, + NFIT_SPA_PM, + NFIT_SPA_DCR, + NFIT_SPA_BDW, + NFIT_SPA_VDISK, + NFIT_SPA_VCD, + NFIT_SPA_PDISK, + NFIT_SPA_PCD, + NFIT_DEV_BUS, + NFIT_DEV_DIMM, + NFIT_UUID_MAX, +}; + +struct nfit_spa { + struct acpi_nfit_system_address *spa; + struct list_head list; +}; + +struct nfit_dcr { + struct acpi_nfit_control_region *dcr; + struct list_head list; +}; + +struct nfit_bdw { + struct acpi_nfit_data_region *bdw; + struct list_head list; +}; + +struct nfit_idt { + struct acpi_nfit_interleave *idt; + struct list_head list; +}; + +struct nfit_memdev { + struct acpi_nfit_memory_map *memdev; + struct list_head list; +}; + +/* assembled tables for a given dimm/memory-device */ +struct nfit_mem { + struct nvdimm *nvdimm; + struct acpi_nfit_memory_map *memdev_dcr; + struct acpi_nfit_memory_map *memdev_pmem; + struct acpi_nfit_memory_map *memdev_bdw; + struct acpi_nfit_control_region *dcr; + struct acpi_nfit_data_region *bdw; + struct acpi_nfit_system_address *spa_dcr; + struct acpi_nfit_system_address *spa_bdw; + struct acpi_nfit_interleave *idt_dcr; + struct acpi_nfit_interleave *idt_bdw; + struct list_head list; + struct acpi_device *adev; + unsigned long dsm_mask; +}; + +struct acpi_nfit_desc { + struct nvdimm_bus_descriptor nd_desc; + struct acpi_table_nfit *nfit; + struct mutex spa_map_mutex; + struct list_head spa_maps; + struct list_head memdevs; + struct list_head dimms; + struct list_head spas; + struct list_head dcrs; + struct list_head bdws; + struct list_head idts; + struct nvdimm_bus *nvdimm_bus; + struct device *dev; + unsigned long dimm_dsm_force_en; + int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, + void *iobuf, u64 len, int rw); +}; + +enum nd_blk_mmio_selector { + BDW, + DCR, +}; + +struct nfit_blk { + struct nfit_blk_mmio { + union { + void __iomem *base; + void *aperture; + }; + u64 size; + u64 base_offset; + u32 line_size; + u32 num_lines; + u32 table_size; + struct acpi_nfit_interleave *idt; + struct acpi_nfit_system_address *spa; + } mmio[2]; + struct nd_region *nd_region; + u64 bdw_offset; /* post interleave offset */ + u64 stat_offset; + u64 cmd_offset; +}; + +struct nfit_spa_mapping { + struct acpi_nfit_desc *acpi_desc; + struct acpi_nfit_system_address *spa; + struct list_head list; + struct kref kref; + void __iomem *iomem; +}; + +static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) +{ + return container_of(kref, struct nfit_spa_mapping, kref); +} + +static inline struct acpi_nfit_memory_map *__to_nfit_memdev( + struct nfit_mem *nfit_mem) +{ + if (nfit_mem->memdev_dcr) + return nfit_mem->memdev_dcr; + return nfit_mem->memdev_pmem; +} + +static inline struct acpi_nfit_desc *to_acpi_desc( + struct nvdimm_bus_descriptor *nd_desc) +{ + return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); +} + +const u8 *to_nfit_uuid(enum nfit_uuids id); +int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); +extern const struct attribute_group *acpi_nfit_attribute_groups[]; +#endif /* __NFIT_H__ */ diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 1333cbdc3ea2..acaa3b4ea504 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -29,6 +29,8 @@ #include <linux/errno.h> #include <linux/acpi.h> #include <linux/numa.h> +#include <linux/nodemask.h> +#include <linux/topology.h> #define PREFIX "ACPI: " @@ -70,7 +72,12 @@ static void __acpi_map_pxm_to_node(int pxm, int node) int acpi_map_pxm_to_node(int pxm) { - int node = pxm_to_node_map[pxm]; + int node; + + if (pxm < 0 || pxm >= MAX_PXM_DOMAINS) + return NUMA_NO_NODE; + + node = pxm_to_node_map[pxm]; if (node == NUMA_NO_NODE) { if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) @@ -83,6 +90,45 @@ int acpi_map_pxm_to_node(int pxm) return node; } +/** + * acpi_map_pxm_to_online_node - Map proximity ID to online node + * @pxm: ACPI proximity ID + * + * This is similar to acpi_map_pxm_to_node(), but always returns an online + * node. When the mapped node from a given proximity ID is offline, it + * looks up the node distance table and returns the nearest online node. + * + * ACPI device drivers, which are called after the NUMA initialization has + * completed in the kernel, can call this interface to obtain their device + * NUMA topology from ACPI tables. Such drivers do not have to deal with + * offline nodes. A node may be offline when a device proximity ID is + * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. + * "numa=off" on x86. + */ +int acpi_map_pxm_to_online_node(int pxm) +{ + int node, n, dist, min_dist; + + node = acpi_map_pxm_to_node(pxm); + + if (node == NUMA_NO_NODE) + node = 0; + + if (!node_online(node)) { + min_dist = INT_MAX; + for_each_online_node(n) { + dist = node_distance(node, n); + if (dist < min_dist) { + min_dist = dist; + node = n; + } + } + } + + return node; +} +EXPORT_SYMBOL(acpi_map_pxm_to_online_node); + static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { @@ -328,8 +374,6 @@ int acpi_get_node(acpi_handle handle) int pxm; pxm = acpi_get_pxm(handle); - if (pxm < 0 || pxm >= MAX_PXM_DOMAINS) - return NUMA_NO_NODE; return acpi_map_pxm_to_node(pxm); } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 37e592798cd5..c262e4acd68d 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -175,14 +175,10 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, if (!addr || !length) return; - /* Resources are never freed */ - if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) - request_region(addr, length, desc); - else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - request_mem_region(addr, length, desc); + acpi_reserve_region(addr, length, gas->space_id, 0, desc); } -static int __init acpi_reserve_resources(void) +static void __init acpi_reserve_resources(void) { acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, "ACPI PM1a_EVT_BLK"); @@ -211,10 +207,7 @@ static int __init acpi_reserve_resources(void) if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) acpi_request_region(&acpi_gbl_FADT.xgpe1_block, acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); - - return 0; } -device_initcall(acpi_reserve_resources); void acpi_os_printf(const char *fmt, ...) { @@ -556,7 +549,7 @@ static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; acpi_status acpi_os_predefined_override(const struct acpi_predefined_names *init_val, - acpi_string * new_val) + char **new_val) { if (!init_val || !new_val) return AE_BAD_PARAMETER; @@ -1705,6 +1698,12 @@ int acpi_resources_are_enforced(void) } EXPORT_SYMBOL(acpi_resources_are_enforced); +bool acpi_osi_is_win8(void) +{ + return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; +} +EXPORT_SYMBOL(acpi_osi_is_win8); + /* * Deallocate the memory for a spinlock. */ @@ -1863,6 +1862,7 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { + acpi_reserve_resources(); kacpid_wq = alloc_workqueue("kacpid", 0, 1); kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index b1def411c0b8..304eccb0ae5c 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -44,7 +44,6 @@ ACPI_MODULE_NAME("pci_irq"); struct acpi_prt_entry { - struct list_head list; struct acpi_pci_id id; u8 pin; acpi_handle link; @@ -163,7 +162,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, { int segment = pci_domain_nr(dev->bus); int bus = dev->bus->number; - int device = PCI_SLOT(dev->devfn); + int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn); struct acpi_prt_entry *entry; if (((prt->address >> 16) & 0xffff) != device || diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index e0bcfb642b52..93eac53b5110 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -684,7 +684,8 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) } } - *state = ACPI_STATE_D3_COLD; + *state = device->power.states[ACPI_STATE_D3_COLD].flags.valid ? + ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT; return 0; } @@ -710,8 +711,6 @@ int acpi_power_transition(struct acpi_device *device, int state) || (device->power.state > ACPI_STATE_D3_COLD)) return -ENODEV; - /* TBD: Resources must be ordered. */ - /* * First we reference all power resources required in the target list * (e.g. so the device doesn't lose power while transitioning). Then, @@ -761,6 +760,25 @@ static void acpi_power_sysfs_remove(struct acpi_device *device) device_remove_file(&device->dev, &dev_attr_resource_in_use); } +static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource) +{ + mutex_lock(&power_resource_list_lock); + + if (!list_empty(&acpi_power_resource_list)) { + struct acpi_power_resource *r; + + list_for_each_entry(r, &acpi_power_resource_list, list_node) + if (r->order > resource->order) { + list_add_tail(&resource->list_node, &r->list_node); + goto out; + } + } + list_add_tail(&resource->list_node, &acpi_power_resource_list); + + out: + mutex_unlock(&power_resource_list_lock); +} + int acpi_add_power_resource(acpi_handle handle) { struct acpi_power_resource *resource; @@ -811,9 +829,7 @@ int acpi_add_power_resource(acpi_handle handle) if (!device_create_file(&device->dev, &dev_attr_resource_in_use)) device->remove = acpi_power_sysfs_remove; - mutex_lock(&power_resource_list_lock); - list_add(&resource->list_node, &acpi_power_resource_list); - mutex_unlock(&power_resource_list_lock); + acpi_power_add_resource_to_list(resource); acpi_device_add_finalize(device); return 0; @@ -844,7 +860,22 @@ void acpi_resume_power_resources(void) && resource->ref_count) { dev_info(&resource->device.dev, "Turning ON\n"); __acpi_power_on(resource); - } else if (state == ACPI_POWER_RESOURCE_STATE_ON + } + + mutex_unlock(&resource->resource_lock); + } + list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { + int result, state; + + mutex_lock(&resource->resource_lock); + + result = acpi_power_get_state(resource->device.handle, &state); + if (result) { + mutex_unlock(&resource->resource_lock); + continue; + } + + if (state == ACPI_POWER_RESOURCE_STATE_ON && !resource->ref_count) { dev_info(&resource->device.dev, "Turning OFF\n"); __acpi_power_off(resource); diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index b1ec78b8a645..33a38d604630 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -184,7 +184,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) phys_cpuid_t phys_id; phys_id = map_mat_entry(handle, type, acpi_id); - if (phys_id == PHYS_CPUID_INVALID) + if (invalid_phys_cpuid(phys_id)) phys_id = map_madt_entry(type, acpi_id); return phys_id; @@ -196,7 +196,7 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) int i; #endif - if (phys_id == PHYS_CPUID_INVALID) { + if (invalid_phys_cpuid(phys_id)) { /* * On UP processor, there is no _MAT or MADT table. * So above phys_id is always set to PHYS_CPUID_INVALID. @@ -215,12 +215,12 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) * Ignores phys_id and always returns 0 for the processor * handle with acpi id 0 if nr_cpu_ids is 1. * This should be the case if SMP tables are not found. - * Return -1 for other CPU's handle. + * Return -EINVAL for other CPU's handle. */ if (nr_cpu_ids <= 1 && acpi_id == 0) return acpi_id; else - return -1; + return -EINVAL; } #ifdef CONFIG_SMP @@ -233,7 +233,7 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) if (phys_id == 0) return phys_id; #endif - return -1; + return -ENODEV; } int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 39e0c8e36244..d540f42c9232 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -94,7 +94,7 @@ static int set_max_cstate(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_power_dmi_table[] = { +static const struct dmi_system_id processor_power_dmi_table[] = { { set_max_cstate, "Clevo 5600D", { DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index e5dd80800930..7cfbda4d7c51 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -52,10 +52,7 @@ static bool __init processor_physically_present(acpi_handle handle) type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; cpuid = acpi_get_cpuid(handle, type, acpi_id); - if (cpuid == -1) - return false; - - return true; + return !invalid_logical_cpuid(cpuid); } static void acpi_set_pdc_bits(u32 *buf) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 0d083736e25b..7836e2e980f4 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -79,50 +79,51 @@ static bool acpi_properties_format_valid(const union acpi_object *properties) static void acpi_init_of_compatible(struct acpi_device *adev) { const union acpi_object *of_compatible; - struct acpi_hardware_id *hwid; - bool acpi_of = false; int ret; - /* - * Check if the special PRP0001 ACPI ID is present and in that - * case we fill in Device Tree compatible properties for this - * device. - */ - list_for_each_entry(hwid, &adev->pnp.ids, list) { - if (!strcmp(hwid->id, "PRP0001")) { - acpi_of = true; - break; - } - } - - if (!acpi_of) - return; - ret = acpi_dev_get_property_array(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { ret = acpi_dev_get_property(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { - acpi_handle_warn(adev->handle, - "PRP0001 requires compatible property\n"); + if (adev->parent + && adev->parent->flags.of_compatible_ok) + goto out; + return; } } adev->data.of_compatible = of_compatible; + + out: + adev->flags.of_compatible_ok = 1; } void acpi_init_properties(struct acpi_device *adev) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + bool acpi_of = false; + struct acpi_hardware_id *hwid; const union acpi_object *desc; acpi_status status; int i; + /* + * Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in + * Device Tree compatible properties for this device. + */ + list_for_each_entry(hwid, &adev->pnp.ids, list) { + if (!strcmp(hwid->id, ACPI_DT_NAMESPACE_HID)) { + acpi_of = true; + break; + } + } + status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf, ACPI_TYPE_PACKAGE); if (ACPI_FAILURE(status)) - return; + goto out; desc = buf.pointer; if (desc->package.count % 2) @@ -156,13 +157,20 @@ void acpi_init_properties(struct acpi_device *adev) adev->data.pointer = buf.pointer; adev->data.properties = properties; - acpi_init_of_compatible(adev); - return; + if (acpi_of) + acpi_init_of_compatible(adev); + + goto out; } fail: - dev_warn(&adev->dev, "Returned _DSD data is not valid, skipping\n"); + dev_dbg(&adev->dev, "Returned _DSD data is not valid, skipping\n"); ACPI_FREE(buf.pointer); + + out: + if (acpi_of && !adev->flags.of_compatible_ok) + acpi_handle_info(adev->handle, + ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n"); } void acpi_free_properties(struct acpi_device *adev) diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 8244f013f210..10561ce16ed1 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -26,6 +26,7 @@ #include <linux/device.h> #include <linux/export.h> #include <linux/ioport.h> +#include <linux/list.h> #include <linux/slab.h> #ifdef CONFIG_X86 @@ -621,3 +622,164 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares, return (type & types) ? 0 : 1; } EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); + +struct reserved_region { + struct list_head node; + u64 start; + u64 end; +}; + +static LIST_HEAD(reserved_io_regions); +static LIST_HEAD(reserved_mem_regions); + +static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags, + char *desc) +{ + unsigned int length = end - start + 1; + struct resource *res; + + res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ? + request_region(start, length, desc) : + request_mem_region(start, length, desc); + if (!res) + return -EIO; + + res->flags &= ~flags; + return 0; +} + +static int add_region_before(u64 start, u64 end, u8 space_id, + unsigned long flags, char *desc, + struct list_head *head) +{ + struct reserved_region *reg; + int error; + + reg = kmalloc(sizeof(*reg), GFP_KERNEL); + if (!reg) + return -ENOMEM; + + error = request_range(start, end, space_id, flags, desc); + if (error) { + kfree(reg); + return error; + } + + reg->start = start; + reg->end = end; + list_add_tail(®->node, head); + return 0; +} + +/** + * acpi_reserve_region - Reserve an I/O or memory region as a system resource. + * @start: Starting address of the region. + * @length: Length of the region. + * @space_id: Identifier of address space to reserve the region from. + * @flags: Resource flags to clear for the region after requesting it. + * @desc: Region description (for messages). + * + * Reserve an I/O or memory region as a system resource to prevent others from + * using it. If the new region overlaps with one of the regions (in the given + * address space) already reserved by this routine, only the non-overlapping + * parts of it will be reserved. + * + * Returned is either 0 (success) or a negative error code indicating a resource + * reservation problem. It is the code of the first encountered error, but the + * routine doesn't abort until it has attempted to request all of the parts of + * the new region that don't overlap with other regions reserved previously. + * + * The resources requested by this routine are never released. + */ +int acpi_reserve_region(u64 start, unsigned int length, u8 space_id, + unsigned long flags, char *desc) +{ + struct list_head *regions; + struct reserved_region *reg; + u64 end = start + length - 1; + int ret = 0, error = 0; + + if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) + regions = &reserved_io_regions; + else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + regions = &reserved_mem_regions; + else + return -EINVAL; + + if (list_empty(regions)) + return add_region_before(start, end, space_id, flags, desc, regions); + + list_for_each_entry(reg, regions, node) + if (reg->start == end + 1) { + /* The new region can be prepended to this one. */ + ret = request_range(start, end, space_id, flags, desc); + if (!ret) + reg->start = start; + + return ret; + } else if (reg->start > end) { + /* No overlap. Add the new region here and get out. */ + return add_region_before(start, end, space_id, flags, + desc, ®->node); + } else if (reg->end == start - 1) { + goto combine; + } else if (reg->end >= start) { + goto overlap; + } + + /* The new region goes after the last existing one. */ + return add_region_before(start, end, space_id, flags, desc, regions); + + overlap: + /* + * The new region overlaps an existing one. + * + * The head part of the new region immediately preceding the existing + * overlapping one can be combined with it right away. + */ + if (reg->start > start) { + error = request_range(start, reg->start - 1, space_id, flags, desc); + if (error) + ret = error; + else + reg->start = start; + } + + combine: + /* + * The new region is adjacent to an existing one. If it extends beyond + * that region all the way to the next one, it is possible to combine + * all three of them. + */ + while (reg->end < end) { + struct reserved_region *next = NULL; + u64 a = reg->end + 1, b = end; + + if (!list_is_last(®->node, regions)) { + next = list_next_entry(reg, node); + if (next->start <= end) + b = next->start - 1; + } + error = request_range(a, b, space_id, flags, desc); + if (!error) { + if (next && next->start == b + 1) { + reg->end = next->end; + list_del(&next->node); + kfree(next); + } else { + reg->end = end; + break; + } + } else if (next) { + if (!ret) + ret = error; + + reg = next; + } else { + break; + } + } + + return ret ? ret : error; +} +EXPORT_SYMBOL_GPL(acpi_reserve_region); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 03141aa4ea95..2649a068671d 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -11,6 +11,7 @@ #include <linux/kthread.h> #include <linux/dmi.h> #include <linux/nls.h> +#include <linux/dma-mapping.h> #include <asm/pgtable.h> @@ -135,12 +136,13 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, struct acpi_hardware_id *id; /* - * Since we skip PRP0001 from the modalias below, 0 should be returned - * if PRP0001 is the only ACPI/PNP ID in the device's list. + * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should + * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the + * device's list. */ count = 0; list_for_each_entry(id, &acpi_dev->pnp.ids, list) - if (strcmp(id->id, "PRP0001")) + if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) count++; if (!count) @@ -153,7 +155,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { - if (!strcmp(id->id, "PRP0001")) + if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) continue; count = snprintf(&modalias[len], size, "%s:", id->id); @@ -177,7 +179,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, * @size: Size of the buffer. * * Expose DT compatible modalias as of:NnameTCcompatible. This function should - * only be called for devices having PRP0001 in their list of ACPI/PNP IDs. + * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of + * ACPI/PNP IDs. */ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, int size) @@ -980,9 +983,9 @@ static void acpi_device_remove_files(struct acpi_device *dev) * @adev: ACPI device object to match. * @of_match_table: List of device IDs to match against. * - * If @dev has an ACPI companion which has the special PRP0001 device ID in its - * list of identifiers and a _DSD object with the "compatible" property, use - * that property to match against the given list of identifiers. + * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of + * identifiers and a _DSD object with the "compatible" property, use that + * property to match against the given list of identifiers. */ static bool acpi_of_match_device(struct acpi_device *adev, const struct of_device_id *of_match_table) @@ -1038,14 +1041,14 @@ static const struct acpi_device_id *__acpi_match_device( return id; /* - * Next, check the special "PRP0001" ID and try to match the + * Next, check ACPI_DT_NAMESPACE_HID and try to match the * "compatible" property if found. * * The id returned by the below is not valid, but the only * caller passing non-NULL of_ids here is only interested in * whether or not the return value is NULL. */ - if (!strcmp("PRP0001", hwid->id) + if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) && acpi_of_match_device(device, of_ids)) return id; } @@ -1671,7 +1674,7 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_wakeup_gpe_init(struct acpi_device *device) { - struct acpi_device_id button_device_ids[] = { + static const struct acpi_device_id button_device_ids[] = { {"PNP0C0C", 0}, {"PNP0C0D", 0}, {"PNP0C0E", 0}, @@ -1766,15 +1769,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (acpi_has_method(device->handle, pathname)) ps->flags.explicit_set = 1; - /* - * State is valid if there are means to put the device into it. - * D3hot is only valid if _PR3 present. - */ - if (!list_empty(&ps->resources) - || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) { + /* State is valid if there are means to put the device into it. */ + if (!list_empty(&ps->resources) || ps->flags.explicit_set) ps->flags.valid = 1; - ps->flags.os_accessible = 1; - } ps->power = -1; /* Unknown - driver assigned */ ps->latency = -1; /* Unknown - driver assigned */ @@ -1810,21 +1807,13 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3 states (always valid) */ + /* Set defaults for D0 and D3hot states (always valid) */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - device->power.states[ACPI_STATE_D3_COLD].power = 0; - - /* Set D3cold's explicit_set flag if _PS3 exists. */ - if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) - device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; - - /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ - if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || - device->power.flags.power_resources) - device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; @@ -1947,6 +1936,62 @@ bool acpi_dock_match(acpi_handle handle) return acpi_has_method(handle, "_DCK"); } +static acpi_status +acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, + void **return_value) +{ + long *cap = context; + + if (acpi_has_method(handle, "_BCM") && + acpi_has_method(handle, "_BCL")) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " + "support\n")); + *cap |= ACPI_VIDEO_BACKLIGHT; + if (!acpi_has_method(handle, "_BQC")) + printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " + "cannot determine initial brightness\n"); + /* We have backlight support, no need to scan further */ + return AE_CTRL_TERMINATE; + } + return 0; +} + +/* Returns true if the ACPI object is a video device which can be + * handled by video.ko. + * The device will get a Linux specific CID added in scan.c to + * identify the device as an ACPI graphics device + * Be aware that the graphics device may not be physically present + * Use acpi_video_get_capabilities() to detect general ACPI video + * capabilities of present cards + */ +long acpi_is_video_device(acpi_handle handle) +{ + long video_caps = 0; + + /* Is this device able to support video switching ? */ + if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) + video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; + + /* Is this device able to retrieve a video ROM ? */ + if (acpi_has_method(handle, "_ROM")) + video_caps |= ACPI_VIDEO_ROM_AVAILABLE; + + /* Is this device able to configure which video head to be POSTed ? */ + if (acpi_has_method(handle, "_VPO") && + acpi_has_method(handle, "_GPD") && + acpi_has_method(handle, "_SPD")) + video_caps |= ACPI_VIDEO_DEVICE_POSTING; + + /* Only check for backlight functionality if one of the above hit. */ + if (video_caps) + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, + &video_caps, NULL); + + return video_caps; +} +EXPORT_SYMBOL(acpi_is_video_device); + const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; @@ -2109,6 +2154,39 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) kfree(pnp->unique_id); } +static void acpi_init_coherency(struct acpi_device *adev) +{ + unsigned long long cca = 0; + acpi_status status; + struct acpi_device *parent = adev->parent; + + if (parent && parent->flags.cca_seen) { + /* + * From ACPI spec, OSPM will ignore _CCA if an ancestor + * already saw one. + */ + adev->flags.cca_seen = 1; + cca = parent->flags.coherent_dma; + } else { + status = acpi_evaluate_integer(adev->handle, "_CCA", + NULL, &cca); + if (ACPI_SUCCESS(status)) + adev->flags.cca_seen = 1; + else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) + /* + * If architecture does not specify that _CCA is + * required for DMA-able devices (e.g. x86), + * we default to _CCA=1. + */ + cca = 1; + else + acpi_handle_debug(adev->handle, + "ACPI device is missing _CCA.\n"); + } + + adev->flags.coherent_dma = cca; +} + void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta) { @@ -2127,6 +2205,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device->flags.visited = false; device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); + acpi_init_coherency(device); } void acpi_device_add_finalize(struct acpi_device *device) @@ -2405,7 +2484,7 @@ static void acpi_default_enumeration(struct acpi_device *device) } static const struct acpi_device_id generic_device_ids[] = { - {"PRP0001", }, + {ACPI_DT_NAMESPACE_HID, }, {"", }, }; @@ -2413,8 +2492,8 @@ static int acpi_generic_device_attach(struct acpi_device *adev, const struct acpi_device_id *not_used) { /* - * Since PRP0001 is the only ID handled here, the test below can be - * unconditional. + * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test + * below can be unconditional. */ if (adev->data.of_compatible) acpi_default_enumeration(adev); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d24fa1964eb8..6d4e44ea74ac 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -800,7 +800,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, result = thermal_zone_bind_cooling_device (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device @@ -824,7 +825,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, if (bind) result = thermal_zone_bind_cooling_device (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device (thermal, trip, cdev); @@ -841,7 +843,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, result = thermal_zone_bind_cooling_device (thermal, THERMAL_TRIPS_NONE, cdev, THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device (thermal, THERMAL_TRIPS_NONE, diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index cd49a3982b6a..67c548ad3764 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -712,3 +712,18 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs) return false; } EXPORT_SYMBOL(acpi_check_dsm); + +/* + * acpi_backlight= handling, this is done here rather then in video_detect.c + * because __setup cannot be used in modules. + */ +char acpi_video_backlight_string[16]; +EXPORT_SYMBOL(acpi_video_backlight_string); + +static int __init acpi_backlight(char *str) +{ + strlcpy(acpi_video_backlight_string, str, + sizeof(acpi_video_backlight_string)); + return 1; +} +__setup("acpi_backlight=", acpi_backlight); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c42feb2bacd0..815f75ef2411 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -1,106 +1,61 @@ /* + * Copyright (C) 2015 Red Hat Inc. + * Hans de Goede <hdegoede@redhat.com> * Copyright (C) 2008 SuSE Linux Products GmbH * Thomas Renninger <trenn@suse.de> * * May be copied or modified under the terms of the GNU General Public License * * video_detect.c: - * Provides acpi_is_video_device() for early scanning of ACPI devices in scan.c - * There a Linux specific (Spec does not provide a HID for video devices) is - * assigned - * * After PCI devices are glued with ACPI devices * acpi_get_pci_dev() can be called to identify ACPI graphics * devices for which a real graphics card is plugged in * - * Now acpi_video_get_capabilities() can be called to check which - * capabilities the graphics cards plugged in support. The check for general - * video capabilities will be triggered by the first caller of - * acpi_video_get_capabilities(NULL); which will happen when the first - * backlight switching supporting driver calls: - * acpi_video_backlight_support(); - * * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) * are available, video.ko should be used to handle the device. * * Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop, * sony_acpi,... can take care about backlight brightness. * - * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) - * this file will not be compiled, acpi_video_get_capabilities() and - * acpi_video_backlight_support() will always return 0 and vendor specific - * drivers always can handle backlight. + * Backlight drivers can use acpi_video_get_backlight_type() to determine + * which driver should handle the backlight. * + * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) + * this file will not be compiled and acpi_video_get_backlight_type() will + * always return acpi_backlight_vendor. */ #include <linux/export.h> #include <linux/acpi.h> +#include <linux/backlight.h> #include <linux/dmi.h> +#include <linux/module.h> #include <linux/pci.h> - -#include "internal.h" +#include <linux/types.h> +#include <acpi/video.h> ACPI_MODULE_NAME("video"); #define _COMPONENT ACPI_VIDEO_COMPONENT -static long acpi_video_support; -static bool acpi_video_caps_checked; +void acpi_video_unregister_backlight(void); -static acpi_status -acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, - void **return_value) -{ - long *cap = context; +static bool backlight_notifier_registered; +static struct notifier_block backlight_nb; - if (acpi_has_method(handle, "_BCM") && - acpi_has_method(handle, "_BCL")) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " - "support\n")); - *cap |= ACPI_VIDEO_BACKLIGHT; - if (!acpi_has_method(handle, "_BQC")) - printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " - "cannot determine initial brightness\n"); - /* We have backlight support, no need to scan further */ - return AE_CTRL_TERMINATE; - } - return 0; -} +static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; +static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; -/* Returns true if the ACPI object is a video device which can be - * handled by video.ko. - * The device will get a Linux specific CID added in scan.c to - * identify the device as an ACPI graphics device - * Be aware that the graphics device may not be physically present - * Use acpi_video_get_capabilities() to detect general ACPI video - * capabilities of present cards - */ -long acpi_is_video_device(acpi_handle handle) +static void acpi_video_parse_cmdline(void) { - long video_caps = 0; - - /* Is this device able to support video switching ? */ - if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) - video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; - - /* Is this device able to retrieve a video ROM ? */ - if (acpi_has_method(handle, "_ROM")) - video_caps |= ACPI_VIDEO_ROM_AVAILABLE; - - /* Is this device able to configure which video head to be POSTed ? */ - if (acpi_has_method(handle, "_VPO") && - acpi_has_method(handle, "_GPD") && - acpi_has_method(handle, "_SPD")) - video_caps |= ACPI_VIDEO_DEVICE_POSTING; - - /* Only check for backlight functionality if one of the above hit. */ - if (video_caps) - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, - &video_caps, NULL); - - return video_caps; + if (!strcmp("vendor", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_vendor; + if (!strcmp("video", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_video; + if (!strcmp("native", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_native; + if (!strcmp("none", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_none; } -EXPORT_SYMBOL(acpi_is_video_device); static acpi_status find_video(acpi_handle handle, u32 lvl, void *context, void **rv) @@ -109,7 +64,7 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) struct pci_dev *dev; struct acpi_device *acpi_dev; - const struct acpi_device_id video_ids[] = { + static const struct acpi_device_id video_ids[] = { {ACPI_VIDEO_HID, 0}, {"", 0}, }; @@ -130,11 +85,23 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) { - acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; + acpi_backlight_dmi = acpi_backlight_vendor; + return 0; +} + +static int video_detect_force_video(const struct dmi_system_id *d) +{ + acpi_backlight_dmi = acpi_backlight_video; return 0; } -static struct dmi_system_id video_detect_dmi_table[] = { +static int video_detect_force_native(const struct dmi_system_id *d) +{ + acpi_backlight_dmi = acpi_backlight_native; + return 0; +} + +static const struct dmi_system_id video_detect_dmi_table[] = { /* On Samsung X360, the BIOS will set a flag (VDRV) if generic * ACPI backlight device is used. This flag will definitively break * the backlight interface (even the vendor interface) untill next @@ -174,137 +141,209 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), }, }, + + /* + * These models have a working acpi_video backlight control, and using + * native backlight causes a regression where backlight does not work + * when userspace is not handling brightness key events. Disable + * native_backlight on these to fix this: + * https://bugzilla.kernel.org/show_bug.cgi?id=81691 + */ + { + .callback = video_detect_force_video, + .ident = "ThinkPad T420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "ThinkPad T520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "ThinkPad X201s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + + /* The native backlight controls do not work on some older machines */ + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ + .callback = video_detect_force_video, + .ident = "HP ENVY 15 Notebook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "370R4E/370R4V/370R5E/3570RE/370R5V"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "3570R/370R/470R/450R/510R/4450RV"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 730U3E/740U3E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), + }, + }, + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "900X3C/900X3D/900X3E/900X4C/900X4D"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ + .callback = video_detect_force_video, + .ident = "Dell XPS15 L521X", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), + }, + }, + + /* Non win8 machines which need native backlight nevertheless */ + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ + .callback = video_detect_force_native, + .ident = "Lenovo Ideapad Z570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ + .callback = video_detect_force_native, + .ident = "Apple MacBook Pro 12,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"), + }, + }, { }, }; +static int acpi_video_backlight_notify(struct notifier_block *nb, + unsigned long val, void *bd) +{ + struct backlight_device *backlight = bd; + + /* A raw bl registering may change video -> native */ + if (backlight->props.type == BACKLIGHT_RAW && + val == BACKLIGHT_REGISTERED && + acpi_video_get_backlight_type() != acpi_backlight_video) + acpi_video_unregister_backlight(); + + return NOTIFY_OK; +} + /* - * Returns the video capabilities of a specific ACPI graphics device + * Determine which type of backlight interface to use on this system, + * First check cmdline, then dmi quirks, then do autodetect. * - * if NULL is passed as argument all ACPI devices are enumerated and - * all graphics capabilities of physically present devices are - * summarized and returned. This is cached and done only once. + * The autodetect order is: + * 1) Is the acpi-video backlight interface supported -> + * no, use a vendor interface + * 2) Is this a win8 "ready" BIOS and do we have a native interface -> + * yes, use a native interface + * 3) Else use the acpi-video interface + * + * Arguably the native on win8 check should be done first, but that would + * be a behavior change, which may causes issues. */ -long acpi_video_get_capabilities(acpi_handle graphics_handle) +enum acpi_backlight_type acpi_video_get_backlight_type(void) { - long caps = 0; - struct acpi_device *tmp_dev; - acpi_status status; - - if (acpi_video_caps_checked && graphics_handle == NULL) - return acpi_video_support; - - if (!graphics_handle) { - /* Only do the global walk through all graphics devices once */ - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_video, NULL, - &caps, NULL); - /* There might be boot param flags set already... */ - acpi_video_support |= caps; - acpi_video_caps_checked = 1; - /* Add blacklists here. Be careful to use the right *DMI* bits - * to still be able to override logic via boot params, e.g.: - * - * if (dmi_name_in_vendors("XY")) { - * acpi_video_support |= - * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; - *} - */ + static DEFINE_MUTEX(init_mutex); + static bool init_done; + static long video_caps; + /* Parse cmdline, dmi and acpi only once */ + mutex_lock(&init_mutex); + if (!init_done) { + acpi_video_parse_cmdline(); dmi_check_system(video_detect_dmi_table); - } else { - status = acpi_bus_get_device(graphics_handle, &tmp_dev); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid device")); - return 0; - } - acpi_walk_namespace(ACPI_TYPE_DEVICE, graphics_handle, + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_video, NULL, - &caps, NULL); + &video_caps, NULL); + backlight_nb.notifier_call = acpi_video_backlight_notify; + backlight_nb.priority = 0; + if (backlight_register_notifier(&backlight_nb) == 0) + backlight_notifier_registered = true; + init_done = true; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "We have 0x%lX video support %s %s\n", - graphics_handle ? caps : acpi_video_support, - graphics_handle ? "on device " : "in general", - graphics_handle ? acpi_device_bid(tmp_dev) : "")); - return caps; -} -EXPORT_SYMBOL(acpi_video_get_capabilities); + mutex_unlock(&init_mutex); -static void acpi_video_caps_check(void) -{ - /* - * We must check whether the ACPI graphics device is physically plugged - * in. Therefore this must be called after binding PCI and ACPI devices - */ - if (!acpi_video_caps_checked) - acpi_video_get_capabilities(NULL); -} + if (acpi_backlight_cmdline != acpi_backlight_undef) + return acpi_backlight_cmdline; -bool acpi_osi_is_win8(void) -{ - return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; -} -EXPORT_SYMBOL(acpi_osi_is_win8); + if (acpi_backlight_dmi != acpi_backlight_undef) + return acpi_backlight_dmi; -/* Promote the vendor interface instead of the generic video module. - * This function allow DMI blacklists to be implemented by externals - * platform drivers instead of putting a big blacklist in video_detect.c - * After calling this function you will probably want to call - * acpi_video_unregister() to make sure the video module is not loaded - */ -void acpi_video_dmi_promote_vendor(void) -{ - acpi_video_caps_check(); - acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; -} -EXPORT_SYMBOL(acpi_video_dmi_promote_vendor); - -/* To be called when a driver who previously promoted the vendor - * interface */ -void acpi_video_dmi_demote_vendor(void) -{ - acpi_video_caps_check(); - acpi_video_support &= ~ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; -} -EXPORT_SYMBOL(acpi_video_dmi_demote_vendor); - -/* Returns true if video.ko can do backlight switching */ -int acpi_video_backlight_support(void) -{ - acpi_video_caps_check(); - - /* First check for boot param -> highest prio */ - if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO) - return 1; + if (!(video_caps & ACPI_VIDEO_BACKLIGHT)) + return acpi_backlight_vendor; - /* Then check for DMI blacklist -> second highest prio */ - if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VIDEO) - return 1; + if (acpi_osi_is_win8() && backlight_device_registered(BACKLIGHT_RAW)) + return acpi_backlight_native; - /* Then go the default way */ - return acpi_video_support & ACPI_VIDEO_BACKLIGHT; + return acpi_backlight_video; } -EXPORT_SYMBOL(acpi_video_backlight_support); +EXPORT_SYMBOL(acpi_video_get_backlight_type); /* - * Use acpi_backlight=vendor/video to force that backlight switching - * is processed by vendor specific acpi drivers or video.ko driver. + * Set the preferred backlight interface type based on DMI info. + * This function allows DMI blacklists to be implemented by external + * platform drivers instead of putting a big blacklist in video_detect.c */ -static int __init acpi_backlight(char *str) +void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) { - if (str == NULL || *str == '\0') - return 1; - else { - if (!strcmp("vendor", str)) - acpi_video_support |= - ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; - if (!strcmp("video", str)) - acpi_video_support |= - ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; - } - return 1; + acpi_backlight_dmi = type; + /* Remove acpi-video backlight interface if it is no longer desired */ + if (acpi_video_get_backlight_type() != acpi_backlight_video) + acpi_video_unregister_backlight(); +} +EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type); + +void __exit acpi_video_detect_exit(void) +{ + if (backlight_notifier_registered) + backlight_unregister_notifier(&backlight_nb); } -__setup("acpi_backlight=", acpi_backlight); |