diff options
author | Mark Brown <broonie@kernel.org> | 2024-06-23 13:14:18 +0100 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2024-06-23 13:14:18 +0100 |
commit | 17436001a6bc42c7f55dc547ca5b1a873208d91d (patch) | |
tree | 186b872edc5080b90ef1f8fb88f774d4f242775f /drivers | |
parent | 5d0c35feea339e4a3a9c9e99731e4d49ad5ee329 (diff) | |
parent | d4a0055fdc22381fa256e345095e88d134e354c5 (diff) | |
download | lwn-17436001a6bc42c7f55dc547ca5b1a873208d91d.tar.gz lwn-17436001a6bc42c7f55dc547ca5b1a873208d91d.zip |
spi: add devm_spi_optimize_message() helper
Merge series from David Lechner <dlechner@baylibre.com>:
In the IIO subsystem, we are finding that it is common to call
spi_optimize_message() during driver probe since the SPI message
doesn't change for the lifetime of the driver. This patch adds a
devm_spi_optimize_message() helper to simplify this common pattern.
Diffstat (limited to 'drivers')
265 files changed, 2840 insertions, 1695 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 2d4a35e6dd18..09a87fa222c7 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) dev_name(&adev->dev), event, (u32) ac->state); acpi_notifier_call_chain(adev, event, (u32) ac->state); - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(ac->charger); } } @@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev) if (acpi_ac_get_state(ac)) return 0; if (old_state != ac->state) - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(ac->charger); return 0; } diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index ddd072cbc738..2133085deda7 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -191,6 +191,10 @@ void acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id, u32 function); +void +acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node, + acpi_adr_space_type space_id); + acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 18fdf2bc2d49..dc6004daf624 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ -static void -acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, - acpi_adr_space_type space_id); - static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value); @@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle, * ******************************************************************************/ -static void +void acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, acpi_adr_space_type space_id) { diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 3197e6303c5b..624361a5f34d 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) } ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) + +/******************************************************************************* + * + * FUNCTION: acpi_execute_orphan_reg_method + * + * PARAMETERS: device - Handle for the device + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI + * device. This is a _REG method that has no corresponding region + * within the device's scope. + * + ******************************************************************************/ +acpi_status +acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id) +{ + struct acpi_namespace_node *node; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method); + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_validate_handle(device); + if (node) { + + /* + * If an "orphan" _REG method is present in the device's scope + * for the given address space ID, run it. + */ + + acpi_ev_execute_orphan_reg_method(node, space_id); + } else { + status = AE_BAD_PARAMETER; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method) diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index 9515bcfe5e97..73903a497d73 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -909,7 +909,7 @@ static void __exit einj_exit(void) if (einj_initialized) platform_driver_unregister(&einj_driver); - platform_device_del(einj_dev); + platform_device_unregister(einj_dev); } module_init(einj_init); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e7793ee9e649..299ec653388c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); - for (i = 0; i < bytes; ++i, ++address, ++value) + for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? acpi_ec_read(ec, address, value) : acpi_ec_write(ec, address, *value); + if (result < 0) + break; + } if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); @@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, return AE_NOT_FOUND; case -ETIME: return AE_TIME; - default: + case 0: return AE_OK; + default: + return AE_ERROR; } } @@ -1502,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC); + if (scope_handle != ec->handle) + acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); } diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 94e3c000df2e..dc8164b182dc 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context) if (sbs->charger_exists) { acpi_ac_get_present(sbs); if (sbs->charger_present != saved_charger_state) - kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(sbs->charger); } if (sbs->manager_present) { @@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context) acpi_battery_read(bat); if (saved_battery_state == bat->present) continue; - kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE); + power_supply_changed(bat->bat); } } } diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d67881b50bca..a0cfc857fb55 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k) { + int temp; + if (temp_deci_k == THERMAL_TEMP_INVALID) return THERMAL_TEMP_INVALID; - return deci_kelvin_to_millicelsius_with_offset(temp_deci_k, + temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k, tz->kelvin_offset); + if (temp <= 0) + return THERMAL_TEMP_INVALID; + + return temp; } static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip) diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 7dca73417e2b..2fe0934dcd64 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s } /* - * AMD systems from Renoir and Lucienne *require* that the NVME controller + * AMD systems from Renoir onwards *require* that the NVME controller * is put into D3 over a Modern Standby / suspend-to-idle cycle. * * This is "typically" accomplished using the `StorageD3Enable` * property in the _DSD that is checked via the `acpi_storage_d3` function - * but this property was introduced after many of these systems launched - * and most OEM systems don't have it in their BIOS. + * but some OEM systems still don't have it in their BIOS. * * The Microsoft documentation for StorageD3Enable mentioned that Windows has - * a hardcoded allowlist for D3 support, which was used for these platforms. + * a hardcoded allowlist for D3 support as well as a registry key to override + * the BIOS, which has been used for these cases. * * This allows quirking on Linux in a similar fashion. * @@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s * https://bugzilla.kernel.org/show_bug.cgi?id=216773 * https://bugzilla.kernel.org/show_bug.cgi?id=217003 * 2) On at least one HP system StorageD3Enable is missing on the second NVME - disk in the system. + * disk in the system. + * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only + * NVME device. */ -static const struct x86_cpu_id storage_d3_cpu_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */ - {} -}; - bool force_storage_d3(void) { - return x86_match_cpu(storage_d3_cpu_ids); + if (!cpu_feature_enabled(X86_FEATURE_ZEN)) + return false; + return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0; } /* diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cdf29b178ddc..bb4d30d377ae 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1831,11 +1831,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 2 }; - /* set scsi removable (RMB) bit per ata bit, or if the - * AHCI port says it's external (Hotplug-capable, eSATA). + /* + * Set the SCSI Removable Media Bit (RMB) if the ATA removable media + * device bit (obsolete since ATA-8 ACS) is set. */ - if (ata_id_removable(args->id) || - (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) + if (ata_id_removable(args->id)) hdr[1] |= (1 << 7); if (args->dev->class == ATA_DEV_ZAC) { diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 817838e2f70e..3cb455a32d92 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = { .sg_tablesize = MAX_DCMDS, /* We may not need that strict one */ .dma_boundary = ATA_DMA_BOUNDARY, - /* Not sure what the real max is but we know it's less than 64K, let's - * use 64K minus 256 + /* + * The SCSI core requires the segment size to cover at least a page, so + * for 64K page size kernels this must be at least 64K. However the + * hardware can't handle 64K, so pata_macio_qc_prep() will split large + * requests. */ - .max_segment_size = MAX_DBDMA_SEG, + .max_segment_size = SZ_64K, .device_configure = pata_macio_device_configure, .sdev_groups = ata_common_sdev_groups, .can_queue = ATA_DEF_QUEUE, diff --git a/drivers/base/core.c b/drivers/base/core.c index 131d96c6090b..2b4c0624b704 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; @@ -2845,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res) sysfs_remove_group(&dev->kobj, group); } -static void devm_attr_groups_remove(struct device *dev, void *res) -{ - union device_attr_group_devres *devres = res; - const struct attribute_group **groups = devres->groups; - - dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); - sysfs_remove_groups(&dev->kobj, groups); -} - /** * devm_device_add_group - given a device, create a managed attribute group * @dev: The device to create the group for @@ -2886,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) } EXPORT_SYMBOL_GPL(devm_device_add_group); -/** - * devm_device_add_groups - create a bunch of managed attribute groups - * @dev: The device to create the group for - * @groups: The attribute groups to create, NULL terminated - * - * This function creates a bunch of managed attribute groups. If an error - * occurs when creating a group, all previously created groups will be - * removed, unwinding everything back to the original state when this - * function was called. It will explicitly warn and error if any of the - * attribute files being created already exist. - * - * Returns 0 on success or error code from sysfs_create_group on failure. - */ -int devm_device_add_groups(struct device *dev, - const struct attribute_group **groups) -{ - union device_attr_group_devres *devres; - int error; - - devres = devres_alloc(devm_attr_groups_remove, - sizeof(*devres), GFP_KERNEL); - if (!devres) - return -ENOMEM; - - error = sysfs_create_groups(&dev->kobj, groups); - if (error) { - devres_free(devres); - return error; - } - - devres->groups = groups; - devres_add(dev, devres); - return 0; -} -EXPORT_SYMBOL_GPL(devm_device_add_groups); - static int device_add_attrs(struct device *dev) { const struct class *class = dev->class; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 93780f41646b..1153721bc7c2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq, return 0; } +static void loop_clear_limits(struct loop_device *lo, int mode) +{ + struct queue_limits lim = queue_limits_start_update(lo->lo_queue); + + if (mode & FALLOC_FL_ZERO_RANGE) + lim.max_write_zeroes_sectors = 0; + + if (mode & FALLOC_FL_PUNCH_HOLE) { + lim.max_hw_discard_sectors = 0; + lim.discard_granularity = 0; + } + + queue_limits_commit_update(lo->lo_queue, &lim); +} + static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, int mode) { @@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) return -EIO; + + /* + * We initially configure the limits in a hope that fallocate is + * supported and clear them here if that turns out not to be true. + */ + if (unlikely(ret == -EOPNOTSUPP)) + loop_clear_limits(lo, mode); + return ret; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 22a79a62cc4e..b87aa80a46dd 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -589,10 +589,11 @@ static inline int was_interrupted(int result) } /* - * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns - * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed. + * Returns BLK_STS_RESOURCE if the caller should retry after a delay. + * Returns BLK_STS_IOERR if sending failed. */ -static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) +static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, + int index) { struct request *req = blk_mq_rq_from_pdu(cmd); struct nbd_config *config = nbd->config; @@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) type = req_to_nbd_cmd_type(req); if (type == U32_MAX) - return -EIO; + return BLK_STS_IOERR; if (rq_data_dir(req) == WRITE && (config->flags & NBD_FLAG_READ_ONLY)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Write on read-only\n"); - return -EIO; + return BLK_STS_IOERR; } if (req->cmd_flags & REQ_FUA) @@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->sent = sent; } set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } send_pages: if (type != NBD_CMD_WRITE) @@ -715,12 +716,12 @@ send_pages: nsock->pending = req; nsock->sent = sent; set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } /* * The completion might already have come in, @@ -737,7 +738,16 @@ out: trace_nbd_payload_sent(req, handle); nsock->pending = NULL; nsock->sent = 0; - return 0; + __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); + return BLK_STS_OK; + +requeue: + /* retry on a different socket */ + dev_err_ratelimited(disk_to_dev(nbd->disk), + "Request send failed, requeueing\n"); + nbd_mark_nsock_dead(nbd, nsock, 1); + nbd_requeue_cmd(cmd); + return BLK_STS_OK; } static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, @@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) struct nbd_device *nbd = cmd->nbd; struct nbd_config *config; struct nbd_sock *nsock; - int ret; + blk_status_t ret; lockdep_assert_held(&cmd->lock); @@ -1072,28 +1082,11 @@ again: ret = BLK_STS_OK; goto out; } - /* - * Some failures are related to the link going down, so anything that - * returns EAGAIN can be retried on a different socket. - */ ret = nbd_send_cmd(nbd, cmd, index); - /* - * Access to this flag is protected by cmd->lock, thus it's safe to set - * the flag after nbd_send_cmd() succeed to send request to server. - */ - if (!ret) - __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); - else if (ret == -EAGAIN) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed, requeueing\n"); - nbd_mark_nsock_dead(nbd, nsock, 1); - nbd_requeue_cmd(cmd); - ret = BLK_STS_OK; - } out: mutex_unlock(&nsock->tx_lock); nbd_config_put(nbd); - return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret; + return ret; } static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 631dca2e4e84..75f189e42f88 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1824,8 +1824,8 @@ static int null_validate_conf(struct nullb_device *dev) dev->queue_mode = NULL_Q_MQ; } - dev->blocksize = round_down(dev->blocksize, 512); - dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); + if (blk_validate_block_size(dev->blocksize)) + return -EINVAL; if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 6b8b9956ba69..7bb87fa5f7a1 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -28,7 +28,7 @@ #include <linux/tpm_eventlog.h> #ifdef CONFIG_X86 -#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> #endif #define TPM_MINOR 224 /* officially assigned */ diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 176cd8dbf1db..fdef214b9f6b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); - flush_work(&priv->free_irq_work); + if (priv->free_irq_work.func) + flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 13e99cf65efe..690ad8e9b731 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len, static inline bool is_bsw(void) { #ifdef CONFIG_X86 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); + return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0; #else return false; #endif diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 6a77d7e201a9..2f83fb97c6fb 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -204,8 +204,15 @@ fail: pr_err("%pV:%s: %s ID is greater than %zu\n", &vaf, con_id, failure, max_size); va_end(ap_copy); - kfree(cla); - return NULL; + + /* + * Don't fail in this case, but as the entry won't ever match just + * fill it with something that also won't match. + */ + strscpy(cla->con_id, "bad", sizeof(cla->con_id)); + strscpy(cla->dev_id, "bad", sizeof(cla->dev_id)); + + return &cla->cl; } static struct clk_lookup * diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c index 25b8e1a80ddc..b32a59fe55e7 100644 --- a/drivers/clk/sifive/sifive-prci.c +++ b/drivers/clk/sifive/sifive-prci.c @@ -4,7 +4,6 @@ * Copyright (C) 2020 Zong Li */ -#include <linux/clkdev.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> @@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd, return r; } - r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev)); - if (r) { - dev_warn(dev, "Failed to register clkdev for %s: %d\n", - init.name, r); - return r; - } - pd->hw_clks.hws[i] = &pic->hw; } diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index f04ae67dda37..fc275d41d51e 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -26,10 +26,11 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/fs.h> -#include <linux/amd-pstate.h> #include <acpi/cppc_acpi.h> +#include "amd-pstate.h" + /* * Abbreviations: * amd_pstate_ut: used as a shortform for AMD P-State unit test. diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 1b7e82a0ad2e..9ad62dbe8bfb 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -36,7 +36,6 @@ #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/static_call.h> -#include <linux/amd-pstate.h> #include <linux/topology.h> #include <acpi/processor.h> @@ -46,6 +45,8 @@ #include <asm/processor.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> + +#include "amd-pstate.h" #include "amd-pstate-trace.h" #define AMD_PSTATE_TRANSITION_LATENCY 20000 @@ -53,6 +54,37 @@ #define CPPC_HIGHEST_PERF_PERFORMANCE 196 #define CPPC_HIGHEST_PERF_DEFAULT 166 +#define AMD_CPPC_EPP_PERFORMANCE 0x00 +#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 +#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF +#define AMD_CPPC_EPP_POWERSAVE 0xFF + +/* + * enum amd_pstate_mode - driver working mode of amd pstate + */ +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, + AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, +}; + +static const char * const amd_pstate_mode_string[] = { + [AMD_PSTATE_UNDEFINED] = "undefined", + [AMD_PSTATE_DISABLE] = "disable", + [AMD_PSTATE_PASSIVE] = "passive", + [AMD_PSTATE_ACTIVE] = "active", + [AMD_PSTATE_GUIDED] = "guided", + NULL, +}; + +struct quirk_entry { + u32 nominal_freq; + u32 lowest_freq; +}; + /* * TODO: We need more time to fine tune processors with shared memory solution * with community together. @@ -669,7 +701,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) if (state) policy->cpuinfo.max_freq = cpudata->max_freq; else - policy->cpuinfo.max_freq = cpudata->nominal_freq; + policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000; policy->max = policy->cpuinfo.max_freq; diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h new file mode 100644 index 000000000000..e6a28e7f4dbf --- /dev/null +++ b/drivers/cpufreq/amd-pstate.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * + * Author: Meng Li <li.meng@amd.com> + */ + +#ifndef _LINUX_AMD_PSTATE_H +#define _LINUX_AMD_PSTATE_H + +#include <linux/pm_qos.h> + +/********************************************************************* + * AMD P-state INTERFACE * + *********************************************************************/ +/** + * struct amd_aperf_mperf + * @aperf: actual performance frequency clock count + * @mperf: maximum performance frequency clock count + * @tsc: time stamp counter + */ +struct amd_aperf_mperf { + u64 aperf; + u64 mperf; + u64 tsc; +}; + +/** + * struct amd_cpudata - private CPU data for AMD P-State + * @cpu: CPU number + * @req: constraint request to apply + * @cppc_req_cached: cached performance request hints + * @highest_perf: the maximum performance an individual processor may reach, + * assuming ideal conditions + * For platforms that do not support the preferred core feature, the + * highest_pef may be configured with 166 or 255, to avoid max frequency + * calculated wrongly. we take the fixed value as the highest_perf. + * @nominal_perf: the maximum sustained performance level of the processor, + * assuming ideal operating conditions + * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power + * savings are achieved + * @lowest_perf: the absolute lowest performance level of the processor + * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher + * priority. + * @min_limit_perf: Cached value of the performance corresponding to policy->min + * @max_limit_perf: Cached value of the performance corresponding to policy->max + * @min_limit_freq: Cached value of policy->min (in khz) + * @max_limit_freq: Cached value of policy->max (in khz) + * @max_freq: the frequency (in khz) that mapped to highest_perf + * @min_freq: the frequency (in khz) that mapped to lowest_perf + * @nominal_freq: the frequency (in khz) that mapped to nominal_perf + * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf + * @cur: Difference of Aperf/Mperf/tsc count between last and current sample + * @prev: Last Aperf/Mperf/tsc count value read from register + * @freq: current cpu frequency value (in khz) + * @boost_supported: check whether the Processor or SBIOS supports boost mode + * @hw_prefcore: check whether HW supports preferred core featue. + * Only when hw_prefcore and early prefcore param are true, + * AMD P-State driver supports preferred core featue. + * @epp_policy: Last saved policy used to set energy-performance preference + * @epp_cached: Cached CPPC energy-performance preference value + * @policy: Cpufreq policy value + * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value + * + * The amd_cpudata is key private data for each CPU thread in AMD P-State, and + * represents all the attributes and goals that AMD P-State requests at runtime. + */ +struct amd_cpudata { + int cpu; + + struct freq_qos_request req[2]; + u64 cppc_req_cached; + + u32 highest_perf; + u32 nominal_perf; + u32 lowest_nonlinear_perf; + u32 lowest_perf; + u32 prefcore_ranking; + u32 min_limit_perf; + u32 max_limit_perf; + u32 min_limit_freq; + u32 max_limit_freq; + + u32 max_freq; + u32 min_freq; + u32 nominal_freq; + u32 lowest_nonlinear_freq; + + struct amd_aperf_mperf cur; + struct amd_aperf_mperf prev; + + u64 freq; + bool boost_supported; + bool hw_prefcore; + + /* EPP feature related attributes*/ + s16 epp_policy; + s16 epp_cached; + u32 policy; + u64 cppc_cap1_cached; + bool suspended; +}; + +#endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4b986c044741..15de5e3d96fd 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1153,7 +1153,8 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - intel_pstate_get_hwp_cap(cpudata); + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; @@ -1301,12 +1302,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, no_turbo = !!clamp_t(int, input, 0, 1); - if (no_turbo == global.no_turbo) - goto unlock_driver; - - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { goto unlock_driver; } @@ -1761,7 +1767,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1926,7 +1932,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 00a9f0eef8dd..3c2b6144be23 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, struct device *dev; int rc; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); - return ERR_PTR(-EINVAL); - } - cxlr = cxl_region_alloc(cxlrd, id); if (IS_ERR(cxlr)) return cxlr; @@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, { int rc; + switch (mode) { + case CXL_DECODER_RAM: + case CXL_DECODER_PMEM: + break; + default: + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + return ERR_PTR(-EINVAL); + } + rc = memregion_alloc(GFP_KERNEL); if (rc < 0) return ERR_PTR(rc); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 1f3520d76861..a17f3c0cdfa6 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error reading F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, @@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error writing to F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } /* @@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt) } ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); - if (ret) + if (ret) { + ret = pcibios_err_to_errno(ret); goto out; + } gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp); gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp); diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index cdd8480e7368..dbe9fe5f2ca6 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); if (enable) errcmd |= ERRCMD_CE | ERRSTS_UE; @@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); return 0; } diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 869598b20e3a..5268b3f0a25a 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -11,7 +11,7 @@ config FIREWIRE This is the new-generation IEEE 1394 (FireWire) driver stack a.k.a. Juju, a new implementation designed for robustness and simplicity. - See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration + See http://ieee1394.docs.kernel.org/en/latest/migration.html for information about migration from the older Linux 1394 stack to the new driver stack. diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 127d87e3a153..f8b99dd6cd82 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset) int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; - trace_bus_reset_initiate(card->generation, short_reset); + trace_bus_reset_initiate(card->index, card->generation, short_reset); return card->driver->update_phy_reg(card, reg, 0, bit); } void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) { - trace_bus_reset_schedule(card->generation, short_reset); + trace_bus_reset_schedule(card->index, card->generation, short_reset); /* We don't try hard to sort out requests of long vs. short resets. */ card->br_short = short_reset; @@ -249,7 +249,7 @@ static void br_work(struct work_struct *work) /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ if (card->reset_jiffies != 0 && time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { - trace_bus_reset_postpone(card->generation, card->br_short); + trace_bus_reset_postpone(card->index, card->generation, card->br_short); if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) fw_card_put(card); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 55993c9e0b90..9a7dc90330a3 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, struct client *e_client = e->client; u32 rcode; - trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation, + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation, packet->timestamp); switch (status) { @@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) memcpy(pp->data, a->data, sizeof(a->data)); } - trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1], - e->p.header[2]); + trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation, + e->p.header[1], e->p.header[2]); card->driver->send_request(card, &e->p); diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 837cc44d8d9f..8107eebd4296 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, struct fw_node *local_node; unsigned long flags; - trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count); + trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count); spin_lock_irqsave(&card->lock, flags); diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 571fdff65c2b..76ab6a209768 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet, struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); - trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed, - status, packet->timestamp); + trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation, + packet->speed, status, packet->timestamp); switch (status) { case ACK_COMPLETE: @@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode spin_unlock_irqrestore(&card->lock, flags); - trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload, + trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed, + t->packet.header, payload, tcode_is_read_request(tcode) ? 0 : length / 4); card->driver->send_request(card, &t->packet); @@ -463,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { - trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status, + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status, packet->timestamp); complete(&phy_config_done); } @@ -503,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card, phy_config_packet.generation = generation; reinit_completion(&phy_config_done); - trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, + trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index, phy_config_packet.generation, phy_config_packet.header[1], phy_config_packet.header[2]); @@ -674,7 +675,7 @@ static void free_response_callback(struct fw_packet *packet, { struct fw_request *request = container_of(packet, struct fw_request, response); - trace_async_response_outbound_complete((uintptr_t)request, packet->generation, + trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, packet->speed, status, packet->timestamp); // Decrease the reference count since not at in-flight. @@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card, // Increase the reference count so that the object is kept during in-flight. fw_request_get(request); - trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation, - request->response.speed, request->response.header, - data, data ? data_length / 4 : 0); + trace_async_response_outbound_initiate((uintptr_t)request, card->index, + request->response.generation, request->response.speed, + request->response.header, data, + data ? data_length / 4 : 0); card->driver->send_response(card, &request->response); } @@ -995,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) tcode = async_header_get_tcode(p->header); if (tcode_is_link_internal(tcode)) { - trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp, + trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp, p->header[1], p->header[2]); fw_cdev_handle_phy_packet(card, p); return; @@ -1007,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) return; } - trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack, - p->timestamp, p->header, request->data, + trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed, + p->ack, p->timestamp, p->header, request->data, tcode_is_read_request(tcode) ? 0 : request->length / 4); offset = async_header_get_offset(p->header); @@ -1078,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) } spin_unlock_irqrestore(&card->lock, flags); - trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp, - p->header, data, data_length / 4); + trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, + p->timestamp, p->header, data, data_length / 4); if (!t) { timed_out: diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 5b9dc26e6bcb..552c78f5f059 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record, &size, record->buf); if (status != EFI_SUCCESS) { kfree(record->buf); - return -EIO; + return efi_status_to_err(status); } /* @@ -189,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record) return 0; if (status != EFI_SUCCESS) - return -EIO; + return efi_status_to_err(status); /* skip variables that don't concern us */ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) @@ -227,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record) record->size, record->psi->buf, true); efivar_unlock(); - return status == EFI_SUCCESS ? 0 : -EIO; + return efi_status_to_err(status); }; static int efi_pstore_erase(struct pstore_record *record) @@ -238,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record) PSTORE_EFI_ATTRIBUTES, 0, NULL); if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) - return -EIO; + return efi_status_to_err(status); return 0; } diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index 684c9354637c..d0ef93551c44 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) unsigned long __weak kernel_entry_address(unsigned long kernel_addr, efi_loaded_image_t *image) { - return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr; + return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr; } efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index ac8c0ef85158..af2c82f7bd90 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -41,6 +41,7 @@ SECTIONS } /DISCARD/ : { + *(.discard .discard.*) *(.modinfo .init.modinfo) } } diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 5d56bc40a79d..708b777857d3 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); * Calls the appropriate efi_runtime_service() with the appropriate * arguments. */ -static void efi_call_rts(struct work_struct *work) +static void __nocfi efi_call_rts(struct work_struct *work) { const union efi_rts_args *args = efi_rts_work.args; efi_status_t status = EFI_NOT_FOUND; @@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { @@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { @@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) return status; } -static void virt_efi_reset_system(int reset_type, - efi_status_t status, - unsigned long data_size, - efi_char16_t *data) +static void __nocfi +virt_efi_reset_system(int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data) { if (down_trylock(&efi_runtime_lock)) { pr_warn("failed to invoke the reset_system() runtime service:\n" diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3dbddec07028..1c28a48915bb 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1576,7 +1576,7 @@ config GPIO_TPS68470 are "output only" GPIOs. config GPIO_TQMX86 - tristate "TQ-Systems QTMX86 GPIO" + tristate "TQ-Systems TQMx86 GPIO" depends on MFD_TQMX86 || COMPILE_TEST depends on HAS_IOPORT_MAP select GPIOLIB_IRQCHIP diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c index 899335da93c7..7e29a2d8de1a 100644 --- a/drivers/gpio/gpio-gw-pld.c +++ b/drivers/gpio/gpio-gw-pld.c @@ -130,5 +130,6 @@ static struct i2c_driver gw_pld_driver = { }; module_i2c_driver(gw_pld_driver); +MODULE_DESCRIPTION("Gateworks I2C PLD GPIO expander"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index cd9b16dbe1a9..94f6fefc011b 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -168,5 +168,6 @@ static void __exit mc33880_exit(void) module_exit(mc33880_exit); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); +MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 53b69abe6787..7c57eaeb0afe 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -438,5 +438,6 @@ static void __exit pcf857x_exit(void) } module_exit(pcf857x_exit); +MODULE_DESCRIPTION("Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell"); diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 9fc1f3dd4190..a211a02d4b4a 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -438,4 +438,5 @@ static struct amba_driver pl061_gpio_driver = { }; module_amba_driver(pl061_gpio_driver); +MODULE_DESCRIPTION("Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 3a28c1f273c3..f2e7e8754d95 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -6,6 +6,7 @@ * Vadim V.Vlasov <vvlasov@dev.rtsoft.ru> */ +#include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/gpio/driver.h> @@ -28,16 +29,25 @@ #define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */ #define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */ +#define TQMX86_GPII_NONE 0 #define TQMX86_GPII_FALLING BIT(0) #define TQMX86_GPII_RISING BIT(1) +/* Stored in irq_type as a trigger type, but not actually valid as a register + * value, so the name doesn't use "GPII" + */ +#define TQMX86_INT_BOTH (BIT(0) | BIT(1)) #define TQMX86_GPII_MASK (BIT(0) | BIT(1)) #define TQMX86_GPII_BITS 2 +/* Stored in irq_type with GPII bits */ +#define TQMX86_INT_UNMASKED BIT(2) struct tqmx86_gpio_data { struct gpio_chip chip; void __iomem *io_base; int irq; + /* Lock must be held for accessing output and irq_type fields */ raw_spinlock_t spinlock; + DECLARE_BITMAP(output, TQMX86_NGPIO); u8 irq_type[TQMX86_NGPI]; }; @@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, { struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); unsigned long flags; - u8 val; raw_spin_lock_irqsave(&gpio->spinlock, flags); - val = tqmx86_gpio_read(gpio, TQMX86_GPIOD); - if (value) - val |= BIT(offset); - else - val &= ~BIT(offset); - tqmx86_gpio_write(gpio, val, TQMX86_GPIOD); + __assign_bit(offset, gpio->output, value); + tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip, return GPIO_LINE_DIRECTION_OUT; } +static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset) + __must_hold(&gpio->spinlock) +{ + u8 type = TQMX86_GPII_NONE, gpiic; + + if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) { + type = gpio->irq_type[offset] & TQMX86_GPII_MASK; + + if (type == TQMX86_INT_BOTH) + type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO) + ? TQMX86_GPII_FALLING + : TQMX86_GPII_RISING; + } + + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS)); + gpiic |= type << (offset * TQMX86_GPII_BITS); + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); +} + static void tqmx86_gpio_irq_mask(struct irq_data *data) { unsigned int offset = (data->hwirq - TQMX86_NGPO); struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data)); } @@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data) struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data)); + raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] |= TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) unsigned int offset = (data->hwirq - TQMX86_NGPO); unsigned int edge_type = type & IRQF_TRIGGER_MASK; unsigned long flags; - u8 new_type, gpiic; + u8 new_type; switch (edge_type) { case IRQ_TYPE_EDGE_RISING: @@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) new_type = TQMX86_GPII_FALLING; break; case IRQ_TYPE_EDGE_BOTH: - new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING; + new_type = TQMX86_INT_BOTH; break; default: return -EINVAL; /* not supported */ } - gpio->irq_type[offset] = new_type; - raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS)); - gpiic |= new_type << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_GPII_MASK; + gpio->irq_type[offset] |= new_type; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); return 0; @@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); struct irq_chip *irq_chip = irq_desc_get_chip(desc); - unsigned long irq_bits; - int i = 0; + unsigned long irq_bits, flags; + int i; u8 irq_status; chained_irq_enter(irq_chip, desc); @@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS); irq_bits = irq_status; + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) { + /* + * Edge-both triggers are implemented by flipping the edge + * trigger after each interrupt, as the controller only supports + * either rising or falling edge triggers, but not both. + * + * Internally, the TQMx86 GPIO controller has separate status + * registers for rising and falling edge interrupts. GPIIC + * configures which bits from which register are visible in the + * interrupt status register GPIIS and defines what triggers the + * parent IRQ line. Writing to GPIIS always clears both rising + * and falling interrupt flags internally, regardless of the + * currently configured trigger. + * + * In consequence, we can cleanly implement the edge-both + * trigger in software by first clearing the interrupt and then + * setting the new trigger based on the current GPIO input in + * tqmx86_gpio_irq_config() - even if an edge arrives between + * reading the input and setting the trigger, we will have a new + * interrupt pending. + */ + if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH) + tqmx86_gpio_irq_config(gpio, i); + } + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) generic_handle_domain_irq(gpio->chip.irq.domain, i + TQMX86_NGPO); @@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD); + /* + * Reading the previous output state is not possible with TQMx86 hardware. + * Initialize all outputs to 0 to have a defined state that matches the + * shadow register. + */ + tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD); + chip = &gpio->chip; chip->label = "gpio-tqmx86"; chip->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 026444eeb5c6..d0aa277fc3bf 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN config DRM_WERROR bool "Compile the drm subsystem with warnings as errors" depends on DRM && EXPERT + depends on !WERROR default n help A kernel build should not cause any compiler warnings, and this diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 67c234bcf89f..3adaa4670103 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, memset(&bp, 0, sizeof(bp)); *obj = NULL; + flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; bp.size = size; bp.byte_align = alignment; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8d8c39be6129..c556c8b653fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; - bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_GDS)) diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 2e8e6c9875f6..f83ace2d7ec3 100644 --- a/drivers/gpu/drm/amd/include/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2 } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ - //how many states we have - UCHAR ucNumEntries; - - ATOM_PPLIB_STATE_V2 states[1]; + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; }StateArray; typedef struct _ClockInfoArray{ - //how many clock levels we have - UCHAR ucNumEntries; - - //sizeof(ATOM_PPLIB_CLOCK_INFO) - UCHAR ucEntrySize; - - UCHAR clockInfo[]; + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_CLOCK_INFO) + UCHAR ucEntrySize; + + UCHAR clockInfo[]; }ClockInfoArray; typedef struct _NonClockInfoArray{ + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; - //how many non-clock levels we have. normally should be same as number of states - UCHAR ucNumEntries; - //sizeof(ATOM_PPLIB_NONCLOCK_INFO) - UCHAR ucEntrySize; - - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[]; + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries); }NonClockInfoArray; typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record @@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_Clock_Voltage_Dependency_Table; typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record @@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_Clock_Voltage_Limit_Table; union _ATOM_PPLIB_CAC_Leakage_Record @@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record; typedef struct _ATOM_PPLIB_CAC_Leakage_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_CAC_Leakage_Table; typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record @@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_PhaseSheddingLimits_Table; typedef struct _VCEClockInfo{ @@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{ }VCEClockInfo; typedef struct _VCEClockInfoArray{ - UCHAR ucNumEntries; - VCEClockInfo entries[1]; + UCHAR ucNumEntries; + VCEClockInfo entries[] __counted_by(ucNumEntries); }VCEClockInfoArray; typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record @@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table { - UCHAR numEntries; - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_VCE_State_Record @@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record typedef struct _ATOM_PPLIB_VCE_State_Table { - UCHAR numEntries; - ATOM_PPLIB_VCE_State_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_VCE_State_Table; @@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{ }UVDClockInfo; typedef struct _UVDClockInfoArray{ - UCHAR ucNumEntries; - UVDClockInfo entries[1]; + UCHAR ucNumEntries; + UVDClockInfo entries[] __counted_by(ucNumEntries); }UVDClockInfoArray; typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record @@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table { - UCHAR numEntries; - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_UVD_Table @@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record }ATOM_PPLIB_SAMClk_Voltage_Limit_Record; typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[]; + UCHAR numEntries; + ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_SAMClk_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_SAMU_Table @@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record }ATOM_PPLIB_ACPClk_Voltage_Limit_Record; typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_ACPClk_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_ACP_Table @@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{ } ATOM_PPLIB_VQ_Budgeting_Record; typedef struct ATOM_PPLIB_VQ_Budgeting_Table { - UCHAR revid; - UCHAR numEntries; - ATOM_PPLIB_VQ_Budgeting_Record entries[1]; + UCHAR revid; + UCHAR numEntries; + ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries); } ATOM_PPLIB_VQ_Budgeting_Table; #pragma pack() diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bc241b593db1..b6257f34a7c6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && adev->in_s4) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (!en && !adev->in_s0ix) { + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c index d8e449e6ebda..50cb8f7ee6b2 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c @@ -72,11 +72,6 @@ struct gamma_curve_sector { u32 segment_width; }; -struct gamma_curve_segment { - u32 start; - u32 end; -}; - static struct gamma_curve_sector sector_tbl[] = { { 0, 4, 4 }, { 16, 4, 4 }, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 14ee79becacb..5ba62e637a61 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -12,10 +12,8 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/dma-mapping.h> -#ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> -#endif #include <drm/drm_print.h> @@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x) DEFINE_SHOW_ATTRIBUTE(komeda_register); -#ifdef CONFIG_DEBUG_FS static void komeda_debugfs_init(struct komeda_dev *mdev) { if (!debugfs_initialized()) @@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev) debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root, &mdev->err_verbosity); } -#endif static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev) mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS; -#ifdef CONFIG_DEBUG_FS komeda_debugfs_init(mdev); -#endif return mdev; @@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev) sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group); -#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(mdev->debugfs_root); -#endif if (mdev->aclk) clk_prepare_enable(mdev->aclk); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index f3e744172673..f4e76b46ca32 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c, u32 avail_scalers; pipe_st = komeda_pipeline_get_state(c->pipeline, state); - if (!pipe_st) + if (IS_ERR_OR_NULL(pipe_st)) return NULL; avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^ diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 32506524d9a2..fe5fb08c9fc4 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation); static void devm_drm_panel_bridge_release(struct device *dev, void *res) { - struct drm_bridge **bridge = res; + struct drm_bridge *bridge = *(struct drm_bridge **)res; - drm_panel_bridge_remove(*bridge); + if (!bridge) + return; + + drm_bridge_remove(bridge); } /** diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index aa93129c3397..2166208a961d 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BOARD_NAME, "NEXT"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO KUN */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)&lcd1600x2560_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index f48c4343f469..3e6d4c6aa877 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -285,7 +285,6 @@ struct platform_driver dp_driver = { .remove_new = exynos_dp_remove, .driver = { .name = "exynos-dp", - .owner = THIS_MODULE, .pm = pm_ptr(&exynos_dp_pm_ops), .of_match_table = exynos_dp_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index fab135308b70..11a720fef32b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector) struct vidi_context *ctx = ctx_from_connector(connector); struct edid *edid; int edid_len; + int count; /* * the edid data comes from user side and it would be set @@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector) drm_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + count = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return count; } static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index e968824a4c72..1e26cd4f8347 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector) int ret; if (!hdata->ddc_adpt) - return 0; + goto no_edid; edid = drm_get_edid(connector, hdata->ddc_adpt); if (!edid) - return 0; + goto no_edid; hdata->dvi_mode = !connector->display_info.is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", @@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector) kfree(edid); return ret; + +no_edid: + return drm_add_modes_noedid(connector, 640, 480); } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b5f605751b0a..de811e2265da 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev) of_node_put(private->comp_node[i]); } +static void mtk_drm_shutdown(struct platform_device *pdev) +{ + struct mtk_drm_private *private = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(private->drm); +} + static int mtk_drm_sys_prepare(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); @@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = { static struct platform_driver mtk_drm_platform_driver = { .probe = mtk_drm_probe, .remove_new = mtk_drm_remove, + .shutdown = mtk_drm_shutdown, .driver = { .name = "mediatek-drm", .pm = &mtk_drm_pm_ops, diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 13705c5f1497..4b7497a8755c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend) if (nv_two_heads(dev)) NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); if (!suspend) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 88728a0b2c25..674dc567e179 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) nv50_mstm_fini(nouveau_encoder(encoder)); } - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 79cfab53f80e..8c3c1f1e01c5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -43,11 +43,6 @@ #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) -struct init_exec { - bool execute; - bool repeat; -}; - static bool nv_cksum(const uint8_t *data, unsigned int length) { /* diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index aed5d5b51b43..d4725a968827 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->headless) + return; + spin_lock_irq(&drm->hpd_lock); drm->hpd_pending = ~0; spin_unlock_irq(&drm->hpd_lock); @@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) } drm_connector_list_iter_end(&conn_iter); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); drm_kms_helper_poll_disable(dev); @@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev) /* no display hw */ if (ret == -ENODEV) { ret = 0; + drm->headless = true; goto disp_create_err; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e239c6bf4afa..25fca98a20bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -276,6 +276,7 @@ struct nouveau_drm { /* modesetting */ struct nvbios vbios; struct nouveau_display *display; + bool headless; struct work_struct hpd_work; spinlock_t hpd_lock; u32 hpd_pending; diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index e8f385b9c618..28bfc48a9127 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi) if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); - of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + if (ret) + return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n"); drm_panel_add(&ctx->panel); diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c index e83c3e52251d..0250d5f00bf1 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c @@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev) drm_kms_helper_poll_fini(ddev); } +static void shmob_drm_shutdown(struct platform_device *pdev) +{ + struct shmob_drm_device *sdev = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(&sdev->ddev); +} + static int shmob_drm_probe(struct platform_device *pdev) { struct shmob_drm_platform_data *pdata = pdev->dev.platform_data; @@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = { static struct platform_driver shmob_drm_platform_driver = { .probe = shmob_drm_probe, .remove_new = shmob_drm_remove, + .shutdown = shmob_drm_shutdown, .driver = { .name = "shmob-drm", .of_match_table = of_match_ptr(shmob_drm_of_table), diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8f1730aeacc9..823d8d2da17c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -746,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, dev->vram_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "Register MMIO at 0x%pa size is %llu kiB\n", + "Register MMIO at 0x%pa size is %llu KiB\n", &rmmio_start, (uint64_t)rmmio_size / 1024); dev->rmmio = devm_ioremap(dev->drm.dev, rmmio_start, @@ -765,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, fifo_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "FIFO at %pa size is %llu kiB\n", + "FIFO at %pa size is %llu KiB\n", &fifo_start, (uint64_t)fifo_size / 1024); dev->fifo_mem = devm_memremap(dev->drm.dev, fifo_start, @@ -790,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, * SVGA_REG_VRAM_SIZE. */ drm_info(&dev->drm, - "VRAM at %pa size is %llu kiB\n", + "VRAM at %pa size is %llu KiB\n", &dev->vram_start, (uint64_t)dev->vram_size / 1024); return 0; @@ -960,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); - /* - * Workaround for low memory 2D VMs to compensate for the - * allocation taken by fbdev - */ - if (!(dev_priv->capabilities & SVGA_CAP_3D)) - mem_size *= 3; - dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->max_primary_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); @@ -991,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->max_primary_mem = dev_priv->vram_size; } drm_info(&dev_priv->drm, - "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n", + "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n", (u64)dev_priv->vram_size / 1024, (u64)dev_priv->fifo_mem_size / 1024, dev_priv->memory_size / 1024); drm_info(&dev_priv->drm, - "MOB limits: max mob size = %u kB, max mob pages = %u\n", + "MOB limits: max mob size = %u KiB, max mob pages = %u\n", dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); ret = vmw_dma_masks(dev_priv); @@ -1015,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) (unsigned)dev_priv->max_gmr_pages); } drm_info(&dev_priv->drm, - "Maximum display memory size is %llu kiB\n", + "Maximum display memory size is %llu KiB\n", (uint64_t)dev_priv->max_primary_mem / 1024); /* Need mmio memory to check for fifo pitchlock cap. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ecaea0026fc..a1ce41e1c468 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index a0b47c9b33f5..5bd967fbcf55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, } else new_max_pages = gman->max_gmr_pages * 2; if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) { - DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n", + DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n", ((new_max_pages) << (PAGE_SHIFT - 10))); gman->max_gmr_pages = new_max_pages; } else { char buf[256]; snprintf(buf, sizeof(buf), - "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n", + "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n", ((gman->max_gmr_pages) << (PAGE_SHIFT - 10))); vmw_host_printf(buf); DRM_WARN("%s", buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 13b2820cae51..00c4ff684130 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, new_image = vmw_du_cursor_plane_acquire_image(new_vps); changed = false; - if (old_image && new_image) + if (old_image && new_image && old_image != new_image) changed = memcmp(old_image, new_image, size) != 0; return changed; @@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, return 0; } +static bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height) + u64 pitch, + u64 height) { - return ((u64) pitch * (u64) height) < (u64) - ((dev_priv->active_display_unit == vmw_du_screen_target) ? - dev_priv->max_primary_mem : dev_priv->vram_size); + return (pitch * height) < (u64)dev_priv->vram_size; } /** @@ -2873,25 +2872,18 @@ out_unref: enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + enum drm_mode_status ret; struct drm_device *dev = connector->dev; struct vmw_private *dev_priv = vmw_priv(dev); - u32 max_width = dev_priv->texture_max_width; - u32 max_height = dev_priv->texture_max_height; u32 assumed_cpp = 4; if (dev_priv->assume_16bpp) assumed_cpp = 2; - if (dev_priv->active_display_unit == vmw_du_screen_target) { - max_width = min(dev_priv->stdu_max_width, max_width); - max_height = min(dev_priv->stdu_max_height, max_height); - } - - if (max_width < mode->hdisplay) - return MODE_BAD_HVALUE; - - if (max_height < mode->vdisplay) - return MODE_BAD_VVALUE; + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; if (!vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * assumed_cpp, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 2041c4d48daa..a04e0736318d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -43,7 +43,14 @@ #define vmw_connector_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.connector) - +/* + * Some renderers such as llvmpipe will align the width and height of their + * buffers to match their tile size. We need to keep this in mind when exposing + * modes to userspace so that this possible over-allocation will not exceed + * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the + * tile size of current renderers. + */ +#define GPU_TILE_SIZE 64 enum stdu_content_type { SAME_AS_DISPLAY = 0, @@ -85,11 +92,6 @@ struct vmw_stdu_update { SVGA3dCmdUpdateGBScreenTarget body; }; -struct vmw_stdu_dma { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA body; -}; - struct vmw_stdu_surface_copy { SVGA3dCmdHeader header; SVGA3dCmdSurfaceCopy body; @@ -414,6 +416,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; + struct drm_crtc_state *new_crtc_state; int ret; if (!crtc) { @@ -423,6 +426,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (dev_priv->vkms_enabled) drm_crtc_vblank_off(crtc); @@ -434,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, (void) vmw_stdu_update_st(dev_priv, stdu); + /* Don't destroy the Screen Target if we are only setting the + * display as inactive + */ + if (new_crtc_state->enable && + !new_crtc_state->active && + !new_crtc_state->mode_changed) + return; + ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to destroy Screen Target\n"); @@ -829,7 +841,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector) vmw_stdu_destroy(vmw_connector_to_stdu(connector)); } +static enum drm_mode_status +vmw_stdu_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + enum drm_mode_status ret; + struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); + u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4; + /* Align width and height to account for GPU tile over-alignment */ + u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) * + ALIGN(mode->vdisplay, GPU_TILE_SIZE) * + assumed_cpp; + required_mem = ALIGN(required_mem, PAGE_SIZE); + + ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width, + dev_priv->stdu_max_height); + if (ret != MODE_OK) + return ret; + + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; + if (required_mem > dev_priv->max_primary_mem) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_size) + return MODE_MEM; + + return MODE_OK; +} static const struct drm_connector_funcs vmw_stdu_connector_funcs = { .dpms = vmw_du_connector_dpms, @@ -845,7 +891,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = { static const struct drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { .get_modes = vmw_connector_get_modes, - .mode_valid = vmw_connector_mode_valid + .mode_valid = vmw_stdu_connector_mode_valid }; diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 8fc0f3f6ecc5..944770fb2daf 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = { static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg) { struct kobject *kobj = arg; + struct xe_gt *gt = kobj_to_gt(kobj->parent); + + if (gt_to_xe(gt)->info.skip_guc_pc) { + XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + xe_gt_idle_disable_c6(gt); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } sysfs_remove_files(kobj, gt_idle_attrs); kobject_put(kobj); @@ -199,7 +206,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt) void xe_gt_idle_disable_c6(struct xe_gt *gt) { xe_device_assert_mem_access(gt_to_xe(gt)); - xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_mmio_write32(gt, PG_ENABLE, 0); xe_mmio_write32(gt, RC_CONTROL, 0); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 79116ad58620..6c2cfc54442c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) struct xe_tile *tile; unsigned int tid; + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, IS_SRIOV_PF(xe)); + for_each_tile(tile, xe, tid) { lmtt = &tile->sriov.pf.lmtt; xe_lmtt_drop_pages(lmtt, vfid); @@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) unsigned int tid; int err; + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, IS_SRIOV_PF(xe)); + total = 0; for_each_tile(tile, xe, tid) total += pf_get_vf_config_lmem(tile->primary_gt, vfid); @@ -1337,6 +1343,7 @@ fail: static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) { + xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); @@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) int err; xe_gt_assert(gt, vfid); + xe_gt_assert(gt, IS_DGFX(xe)); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); size = round_up(size, pf_get_lmem_alignment(gt)); @@ -1745,10 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); + struct xe_device *xe = gt_to_xe(gt); if (!xe_gt_is_media_type(gt)) { pf_release_vf_config_ggtt(gt, config); - pf_release_vf_config_lmem(gt, config); + if (IS_DGFX(xe)) { + pf_release_vf_config_lmem(gt, config); + pf_update_vf_lmtt(xe, vfid); + } } pf_release_config_ctxs(gt, config); pf_release_config_dbs(gt, config); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 509649d0e65e..23382ced4ea7 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc) static void xe_guc_pc_fini(struct drm_device *drm, void *arg) { struct xe_guc_pc *pc = arg; - struct xe_device *xe = pc_to_xe(pc); - - if (xe->info.skip_guc_pc) { - xe_gt_idle_disable_c6(pc_to_gt(pc)); - return; - } XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index d42b3f33bd7a..aca7a9af6e84 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i) return i; } +static int emit_flush_dw(u32 *dw, int i) +{ + dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW; + dw[i++] = 0; + dw[i++] = 0; + dw[i++] = 0; + + return i; +} + static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb, u32 *dw, int i) { @@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); @@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 02de2bf4f790..37e6d25593c2 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -1204,8 +1204,8 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, } /* match many more n-key devices */ - if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { - for (int i = 0; i < *rsize + 1; i++) { + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) { + for (int i = 0; i < *rsize - 15; i++) { /* offset to the count from 0x5a report part always 14 */ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a && rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b1fa0378e8f4..74efda212c55 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report, hid_warn(hid, "%s() called with too large value %d (n: %d)! (%s)\n", __func__, value, n, current->comm); - WARN_ON(1); value &= m; } } diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 87a961cae775..d5abfe652fb5 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -3366,6 +3366,8 @@ static const char *keys[KEY_MAX + 1] = { [KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable", [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable", [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle", + [KEY_ACCESSIBILITY] = "Accessibility", + [KEY_DO_NOT_DISTURB] = "DoNotDisturb", [KEY_DICTATE] = "Dictate", [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 61d2a21affa2..72d56ee7ce1b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -423,6 +423,8 @@ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF #define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8 #define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82 +#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C +#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116 #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index e03d300d2bac..c9094a4f281e 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), @@ -833,9 +837,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/ + switch (usage->hid & 0xf) { + case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break; + default: goto ignore; + } + break; + } + if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ switch (usage->hid & 0xf) { case 0x9: map_key_clear(KEY_MICMUTE); break; + case 0xa: map_key_clear(KEY_ACCESSIBILITY); break; default: goto ignore; } break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 3c3c497b6b91..37958edec55f 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, */ msleep(50); - if (retval) + if (retval) { + kfree(dj_report); return retval; + } } /* diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index b81d5bcc76a7..400d70e6dbe2 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -27,6 +27,7 @@ #include "usbhid/usbhid.h" #include "hid-ids.h" +MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index b4a97803eca3..3062daf68d31 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2725,13 +2725,13 @@ static int nintendo_hid_probe(struct hid_device *hdev, ret = joycon_power_supply_create(ctlr); if (ret) { hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret); - goto err_close; + goto err_ida; } ret = joycon_input_create(ctlr); if (ret) { hid_err(hdev, "Failed to create input device; ret=%d\n", ret); - goto err_close; + goto err_ida; } ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; @@ -2739,6 +2739,8 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_dbg(hdev, "probe - success\n"); return 0; +err_ida: + ida_free(&nintendo_player_id_allocator, ctlr->player_id); err_close: hid_hw_close(hdev); err_stop: diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 58b15750dbb0..ff9078ad1961 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create( return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); - input_ff_create_memless(haptics, NULL, play_effect); + ret = input_ff_create_memless(haptics, NULL, play_effect); + if (ret) + goto err; ret = input_register_device(haptics); if (ret) diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c index 5b91fb106cfc..091e37933225 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c @@ -31,6 +31,7 @@ struct i2c_hid_of_elan { struct regulator *vcc33; struct regulator *vccio; struct gpio_desc *reset_gpio; + bool no_reset_on_power_off; const struct elan_i2c_hid_chip_data *chip_data; }; @@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) container_of(ops, struct i2c_hid_of_elan, ops); int ret; + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->vcc33) { ret = regulator_enable(ihid_elan->vcc33); if (ret) - return ret; + goto err_deassert_reset; } ret = regulator_enable(ihid_elan->vccio); - if (ret) { - regulator_disable(ihid_elan->vcc33); - return ret; - } + if (ret) + goto err_disable_vcc33; if (ihid_elan->chip_data->post_power_delay_ms) msleep(ihid_elan->chip_data->post_power_delay_ms); @@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms); return 0; + +err_disable_vcc33: + if (ihid_elan->vcc33) + regulator_disable(ihid_elan->vcc33); +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static void elan_i2c_hid_power_down(struct i2chid_ops *ops) @@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) struct i2c_hid_of_elan *ihid_elan = container_of(ops, struct i2c_hid_of_elan, ops); - gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + /* + * Do not assert reset when the hardware allows for it to remain + * deasserted regardless of the state of the (shared) power supply to + * avoid wasting power when the supply is left on. + */ + if (!ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms) msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms); @@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) static int i2c_hid_of_elan_probe(struct i2c_client *client) { struct i2c_hid_of_elan *ihid_elan; + int ret; ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL); if (!ihid_elan) @@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client) if (IS_ERR(ihid_elan->reset_gpio)) return PTR_ERR(ihid_elan->reset_gpio); + ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node, + "no-reset-on-power-off"); + ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio"); - if (IS_ERR(ihid_elan->vccio)) - return PTR_ERR(ihid_elan->vccio); + if (IS_ERR(ihid_elan->vccio)) { + ret = PTR_ERR(ihid_elan->vccio); + goto err_deassert_reset; + } ihid_elan->chip_data = device_get_match_data(&client->dev); if (ihid_elan->chip_data->main_supply_name) { ihid_elan->vcc33 = devm_regulator_get(&client->dev, ihid_elan->chip_data->main_supply_name); - if (IS_ERR(ihid_elan->vcc33)) - return PTR_ERR(ihid_elan->vcc33); + if (IS_ERR(ihid_elan->vcc33)) { + ret = PTR_ERR(ihid_elan->vcc33); + goto err_deassert_reset; + } } - return i2c_hid_core_probe(client, &ihid_elan->ops, - ihid_elan->chip_data->hid_descriptor_address, 0); + ret = i2c_hid_core_probe(client, &ihid_elan->ops, + ihid_elan->chip_data->hid_descriptor_address, 0); + if (ret) + goto err_deassert_reset; + + return 0; + +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = { diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c index 993f8b390e57..fcca070bdecb 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.c +++ b/drivers/hid/intel-ish-hid/ishtp/loader.c @@ -84,8 +84,8 @@ static int loader_write_message(struct ishtp_device *dev, void *buf, int len) static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len, void *resp, int resp_len) { - struct loader_msg_header *req_hdr = req; - struct loader_msg_header *resp_hdr = resp; + union loader_msg_header req_hdr; + union loader_msg_header resp_hdr; struct device *devc = dev->devc; int rv; @@ -93,34 +93,37 @@ static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len, dev->fw_loader_rx_size = resp_len; rv = loader_write_message(dev, req, req_len); + req_hdr.val32 = le32_to_cpup(req); + if (rv < 0) { - dev_err(devc, "write cmd %u failed:%d\n", req_hdr->command, rv); + dev_err(devc, "write cmd %u failed:%d\n", req_hdr.command, rv); return rv; } /* Wait the ACK */ wait_event_interruptible_timeout(dev->wait_loader_recvd_msg, dev->fw_loader_received, ISHTP_LOADER_TIMEOUT); + resp_hdr.val32 = le32_to_cpup(resp); dev->fw_loader_rx_size = 0; dev->fw_loader_rx_buf = NULL; if (!dev->fw_loader_received) { - dev_err(devc, "wait response of cmd %u timeout\n", req_hdr->command); + dev_err(devc, "wait response of cmd %u timeout\n", req_hdr.command); return -ETIMEDOUT; } - if (!resp_hdr->is_response) { - dev_err(devc, "not a response for %u\n", req_hdr->command); + if (!resp_hdr.is_response) { + dev_err(devc, "not a response for %u\n", req_hdr.command); return -EBADMSG; } - if (req_hdr->command != resp_hdr->command) { - dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr->command, - resp_hdr->command); + if (req_hdr.command != resp_hdr.command) { + dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr.command, + resp_hdr.command); return -EBADMSG; } - if (resp_hdr->status) { - dev_err(devc, "cmd %u failed %u\n", req_hdr->command, resp_hdr->status); + if (resp_hdr.status) { + dev_err(devc, "cmd %u failed %u\n", req_hdr.command, resp_hdr.status); return -EIO; } @@ -138,12 +141,13 @@ static void release_dma_bufs(struct ishtp_device *dev, struct loader_xfer_dma_fragment *fragment, void **dma_bufs, u32 fragment_size) { + dma_addr_t dma_addr; int i; for (i = 0; i < FRAGMENT_MAX_NUM; i++) { if (dma_bufs[i]) { - dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], - fragment->fragment_tbl[i].ddr_adrs); + dma_addr = le64_to_cpu(fragment->fragment_tbl[i].ddr_adrs); + dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], dma_addr); dma_bufs[i] = NULL; } } @@ -156,29 +160,33 @@ static void release_dma_bufs(struct ishtp_device *dev, * @fragment: The ISHTP firmware fragment descriptor * @dma_bufs: The array of DMA fragment buffers * @fragment_size: The size of a single DMA fragment + * @fragment_count: Number of fragments * * Return: 0 on success, negative error code on failure */ static int prepare_dma_bufs(struct ishtp_device *dev, const struct firmware *ish_fw, struct loader_xfer_dma_fragment *fragment, - void **dma_bufs, u32 fragment_size) + void **dma_bufs, u32 fragment_size, u32 fragment_count) { + dma_addr_t dma_addr; u32 offset = 0; + u32 length; int i; - for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) { - dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, - &fragment->fragment_tbl[i].ddr_adrs, GFP_KERNEL); + for (i = 0; i < fragment_count && offset < ish_fw->size; i++) { + dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, &dma_addr, GFP_KERNEL); if (!dma_bufs[i]) return -ENOMEM; - fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size); - fragment->fragment_tbl[i].fw_off = offset; - memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length); + fragment->fragment_tbl[i].ddr_adrs = cpu_to_le64(dma_addr); + length = clamp(ish_fw->size - offset, 0, fragment_size); + fragment->fragment_tbl[i].length = cpu_to_le32(length); + fragment->fragment_tbl[i].fw_off = cpu_to_le32(offset); + memcpy(dma_bufs[i], ish_fw->data + offset, length); clflush_cache_range(dma_bufs[i], fragment_size); - offset += fragment->fragment_tbl[i].length; + offset += length; } return 0; @@ -206,17 +214,17 @@ void ishtp_loader_work(struct work_struct *work) { DEFINE_RAW_FLEX(struct loader_xfer_dma_fragment, fragment, fragment_tbl, FRAGMENT_MAX_NUM); struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader); - struct loader_xfer_query query = { - .header.command = LOADER_CMD_XFER_QUERY, - }; - struct loader_start start = { - .header.command = LOADER_CMD_START, - }; + union loader_msg_header query_hdr = { .command = LOADER_CMD_XFER_QUERY, }; + union loader_msg_header start_hdr = { .command = LOADER_CMD_START, }; + union loader_msg_header fragment_hdr = { .command = LOADER_CMD_XFER_FRAGMENT, }; + struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), }; + struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), }; union loader_recv_message recv_msg; char *filename = dev->driver_data->fw_filename; const struct firmware *ish_fw; void *dma_bufs[FRAGMENT_MAX_NUM] = {}; u32 fragment_size; + u32 fragment_count; int retry = ISHTP_LOADER_RETRY_TIMES; int rv; @@ -226,23 +234,24 @@ void ishtp_loader_work(struct work_struct *work) return; } - fragment->fragment.header.command = LOADER_CMD_XFER_FRAGMENT; - fragment->fragment.xfer_mode = LOADER_XFER_MODE_DMA; - fragment->fragment.is_last = 1; - fragment->fragment.size = ish_fw->size; + fragment->fragment.header = cpu_to_le32(fragment_hdr.val32); + fragment->fragment.xfer_mode = cpu_to_le32(LOADER_XFER_MODE_DMA); + fragment->fragment.is_last = cpu_to_le32(1); + fragment->fragment.size = cpu_to_le32(ish_fw->size); /* Calculate the size of a single DMA fragment */ fragment_size = PFN_ALIGN(DIV_ROUND_UP(ish_fw->size, FRAGMENT_MAX_NUM)); /* Calculate the count of DMA fragments */ - fragment->fragment_cnt = DIV_ROUND_UP(ish_fw->size, fragment_size); + fragment_count = DIV_ROUND_UP(ish_fw->size, fragment_size); + fragment->fragment_cnt = cpu_to_le32(fragment_count); - rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size); + rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size, fragment_count); if (rv) { dev_err(dev->devc, "prepare DMA buffer failed.\n"); goto out; } do { - query.image_size = ish_fw->size; + query.image_size = cpu_to_le32(ish_fw->size); rv = loader_xfer_cmd(dev, &query, sizeof(query), recv_msg.raw_data, sizeof(struct loader_xfer_query_ack)); if (rv) @@ -255,7 +264,7 @@ void ishtp_loader_work(struct work_struct *work) recv_msg.query_ack.version_build); rv = loader_xfer_cmd(dev, fragment, - struct_size(fragment, fragment_tbl, fragment->fragment_cnt), + struct_size(fragment, fragment_tbl, fragment_count), recv_msg.raw_data, sizeof(struct loader_xfer_fragment_ack)); if (rv) continue; /* try again if failed */ diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h index 7aa45ebc3f7b..308b96085a4d 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.h +++ b/drivers/hid/intel-ish-hid/ishtp/loader.h @@ -30,19 +30,23 @@ struct work_struct; #define LOADER_XFER_MODE_DMA BIT(0) /** - * struct loader_msg_header - ISHTP firmware loader message header + * union loader_msg_header - ISHTP firmware loader message header * @command: Command type * @is_response: Indicates if the message is a response * @has_next: Indicates if there is a next message * @reserved: Reserved for future use * @status: Status of the message - */ -struct loader_msg_header { - __le32 command:7; - __le32 is_response:1; - __le32 has_next:1; - __le32 reserved:15; - __le32 status:8; + * @val32: entire header as a 32-bit value + */ +union loader_msg_header { + struct { + __u32 command:7; + __u32 is_response:1; + __u32 has_next:1; + __u32 reserved:15; + __u32 status:8; + }; + __u32 val32; }; /** @@ -51,7 +55,7 @@ struct loader_msg_header { * @image_size: Size of the image */ struct loader_xfer_query { - struct loader_msg_header header; + __le32 header; __le32 image_size; }; @@ -103,7 +107,7 @@ struct loader_capability { * @capability: Loader capability */ struct loader_xfer_query_ack { - struct loader_msg_header header; + __le32 header; __le16 version_major; __le16 version_minor; __le16 version_hotfix; @@ -122,7 +126,7 @@ struct loader_xfer_query_ack { * @is_last: Is last */ struct loader_xfer_fragment { - struct loader_msg_header header; + __le32 header; __le32 xfer_mode; __le32 offset; __le32 size; @@ -134,7 +138,7 @@ struct loader_xfer_fragment { * @header: Header of the message */ struct loader_xfer_fragment_ack { - struct loader_msg_header header; + __le32 header; }; /** @@ -170,7 +174,7 @@ struct loader_xfer_dma_fragment { * @header: Header of the message */ struct loader_start { - struct loader_msg_header header; + __le32 header; }; /** @@ -178,10 +182,11 @@ struct loader_start { * @header: Header of the message */ struct loader_start_ack { - struct loader_msg_header header; + __le32 header; }; union loader_recv_message { + __le32 header; struct loader_xfer_query_ack query_ack; struct loader_xfer_fragment_ack fragment_ack; struct loader_start_ack start_ack; diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c index d6eeea5166c0..131a67d9d4a6 100644 --- a/drivers/i2c/busses/i2c-at91-slave.c +++ b/drivers/i2c/busses/i2c-at91-slave.c @@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave) static u32 at91_twi_func(struct i2c_adapter *adapter) { - return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL - | I2C_FUNC_SMBUS_READ_BLOCK_DATA; + return I2C_FUNC_SLAVE; } static const struct i2c_algorithm at91_twi_algorithm_slave = { diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 2e079cf20bb5..78e2c47e3d7d 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = { void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { - dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; + dev->functionality = I2C_FUNC_SLAVE; dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 31ecb2c7e978..4eccbcd0fbfc 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -138,7 +138,6 @@ struct synquacer_i2c { int irq; struct device *dev; void __iomem *base; - struct clk *pclk; u32 pclkrate; u32 speed_khz; u32 timeout_ms; @@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = { static int synquacer_i2c_probe(struct platform_device *pdev) { struct synquacer_i2c *i2c; + struct clk *pclk; u32 bus_speed; int ret; @@ -550,13 +550,12 @@ static int synquacer_i2c_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "socionext,pclk-rate", &i2c->pclkrate); - i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); - if (IS_ERR(i2c->pclk)) - return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), + pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); + if (IS_ERR(pclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(pclk), "failed to get and enable clock\n"); - dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk); - i2c->pclkrate = clk_get_rate(i2c->pclk); + i2c->pclkrate = clk_get_rate(pclk); if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE || i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index a7826bba0852..b26d4575e256 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -145,6 +145,7 @@ struct ad7173_device_info { unsigned int id; char *name; bool has_temp; + bool has_input_buf; bool has_int_ref; bool has_ref2; u8 num_gpios; @@ -212,18 +213,21 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 2 * HZ_PER_MHZ, .sinc5_data_rates = ad7173_sinc5_data_rates, .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates), }, [ID_AD7172_4] = { + .name = "ad7172-4", .id = AD7172_4_ID, .num_inputs = 9, .num_channels = 8, .num_configs = 8, .num_gpios = 4, .has_temp = false, + .has_input_buf = true, .has_ref2 = true, .clock = 2 * HZ_PER_MHZ, .sinc5_data_rates = ad7173_sinc5_data_rates, @@ -237,6 +241,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 8, .num_gpios = 4, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .has_ref2 = true, .clock = 2 * HZ_PER_MHZ, @@ -251,18 +256,21 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .sinc5_data_rates = ad7175_sinc5_data_rates, .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates), }, [ID_AD7175_8] = { + .name = "ad7175-8", .id = AD7175_8_ID, .num_inputs = 17, .num_channels = 16, .num_configs = 8, .num_gpios = 4, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .has_ref2 = true, .clock = 16 * HZ_PER_MHZ, @@ -277,18 +285,21 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = false, + .has_input_buf = false, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .sinc5_data_rates = ad7175_sinc5_data_rates, .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates), }, [ID_AD7177_2] = { + .name = "ad7177-2", .id = AD7177_ID, .num_inputs = 5, .num_channels = 4, .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .odr_start_value = AD7177_ODR_START_VALUE, @@ -532,6 +543,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append) unsigned int interface_mode = st->interface_mode; int ret; + interface_mode &= ~AD7173_INTERFACE_DATA_STAT; interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append); ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode); if (ret) @@ -705,7 +717,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev, { struct ad7173_state *st = iio_priv(indio_dev); struct ad7173_channel_config *cfg; - unsigned int freq, i, reg; + unsigned int freq, i; int ret; ret = iio_device_claim_direct_mode(indio_dev); @@ -721,16 +733,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev, cfg = &st->channels[chan->address].cfg; cfg->odr = i; - - if (!cfg->live) - break; - - ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, ®); - if (ret) - break; - reg &= ~AD7173_FILTER_ODR0_MASK; - reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i); - ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg); + cfg->live = false; break; default: @@ -792,8 +795,7 @@ static const struct iio_chan_spec ad7173_channel_template = { .type = IIO_VOLTAGE, .indexed = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), - .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_type = { .sign = 'u', .realbits = 24, @@ -804,12 +806,11 @@ static const struct iio_chan_spec ad7173_channel_template = { static const struct iio_chan_spec ad7173_temp_iio_channel_template = { .type = IIO_TEMP, - .indexed = 1, .channel = AD7173_AIN_TEMP_POS, .channel2 = AD7173_AIN_TEMP_NEG, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), - .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_type = { .sign = 'u', .realbits = 24, @@ -932,7 +933,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev) AD7173_CH_ADDRESS(chan_arr[chan_index].channel, chan_arr[chan_index].channel2); chan_st_priv->cfg.bipolar = false; - chan_st_priv->cfg.input_buf = true; + chan_st_priv->cfg.input_buf = st->info->has_input_buf; chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF; st->adc_mode |= AD7173_ADC_MODE_REF_EN; @@ -989,7 +990,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev) chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]); chan_st_priv->chan_reg = chan_index; - chan_st_priv->cfg.input_buf = true; + chan_st_priv->cfg.input_buf = st->info->has_input_buf; chan_st_priv->cfg.odr = 0; chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar"); diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index e85b763b9ffc..8f5b9c3f6e3d 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -243,11 +243,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index, } static const struct iio_chan_spec ad9434_channels[] = { - AD9467_CHAN(0, 0, 12, 'S'), + AD9467_CHAN(0, 0, 12, 's'), }; static const struct iio_chan_spec ad9467_channels[] = { - AD9467_CHAN(0, 0, 16, 'S'), + AD9467_CHAN(0, 0, 16, 's'), }; static const struct ad9467_chip_info ad9467_chip_tbl = { diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c index fa205f17bd90..f44458c380d9 100644 --- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c +++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c @@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP); int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, uint32_t period, bool fifo) { + uint32_t mult; + /* when FIFO is on, prevent odr change if one is already pending */ if (fifo && ts->new_mult != 0) return -EAGAIN; - ts->new_mult = period / ts->chip.clock_period; + mult = period / ts->chip.clock_period; + if (mult != ts->mult) + ts->new_mult = mult; return 0; } diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 076bc9ecfb49..4763402dbcd6 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_NANO; } mutex_lock(&st->lock); diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c index 5d42ab9b176a..67d74a1a1b26 100644 --- a/drivers/iio/imu/bmi323/bmi323_core.c +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels, ARRAY_SIZE(data->buffer.channels)); if (ret) - return IRQ_NONE; + goto out; } else { for_each_set_bit(bit, indio_dev->active_scan_mask, BMI323_CHAN_MAX) { @@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels[index++], BMI323_BYTES_PER_SAMPLE); if (ret) - return IRQ_NONE; + goto out; } } iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); +out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index 83d8504ebfff..4b2566693614 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -130,10 +130,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 63b85ec88c13..a8cf74c84c3c 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) latency_accel = period_accel * wm_accel; /* 0 value for watermark means that the sensor is turned off */ + if (wm_gyro == 0 && wm_accel == 0) + return 0; + if (latency_gyro == 0) { watermark = wm_accel; + st->fifo.watermark.eff_accel = wm_accel; } else if (latency_accel == 0) { watermark = wm_gyro; + st->fifo.watermark.eff_gyro = wm_gyro; } else { /* compute the smallest latency that is a multiple of both */ if (latency_gyro <= latency_accel) @@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) watermark = latency / period; if (watermark < 1) watermark = 1; + /* update effective watermark */ + st->fifo.watermark.eff_gyro = latency / period_gyro; + if (st->fifo.watermark.eff_gyro < 1) + st->fifo.watermark.eff_gyro = 1; + st->fifo.watermark.eff_accel = latency / period_accel; + if (st->fifo.watermark.eff_accel < 1) + st->fifo.watermark.eff_accel = 1; } /* compute watermark value in bytes */ @@ -514,7 +526,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st) /* handle gyroscope timestamp and FIFO data parsing */ if (st->fifo.nb.gyro > 0) { ts = &gyro_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro, + inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro, st->timestamp.gyro); ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro); if (ret) @@ -524,7 +536,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st) /* handle accelerometer timestamp and FIFO data parsing */ if (st->fifo.nb.accel > 0) { ts = &accel_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel, + inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel, st->timestamp.accel); ret = inv_icm42600_accel_parse_fifo(st->indio_accel); if (ret) @@ -577,6 +589,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st) unsigned int val; int ret; + st->fifo.watermark.eff_gyro = 1; + st->fifo.watermark.eff_accel = 1; + /* * Default FIFO configuration (bits 7 to 5) * - use invalid value diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h index 8b85ee333bf8..f6c85daf42b0 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h @@ -32,6 +32,8 @@ struct inv_icm42600_fifo { struct { unsigned int gyro; unsigned int accel; + unsigned int eff_gyro; + unsigned int eff_accel; } watermark; size_t count; struct { diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 96116a68ab29..62fdae530334 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -537,6 +537,7 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq, if (ret) return ret; + irq_type |= IRQF_ONESHOT; return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp, inv_icm42600_irq_handler, irq_type, "inv_icm42600", st); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index e6f8de80128c..938af5b640b0 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 0dc0f22a5582..3d3b27f28c9d 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) goto end_session; /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); - inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp); - inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0); + inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp); + inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0); /* clear internal data buffer for avoiding kernel data leak */ memset(data, 0, sizeof(data)); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 1b603567ccc8..84273660ca2e 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type) if (!st->trig) return -ENOMEM; + irq_type |= IRQF_ONESHOT; ret = devm_request_threaded_irq(&indio_dev->dev, st->irq, &inv_mpu6050_interrupt_timestamp, &inv_mpu6050_interrupt_handle, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 52d773261828..485e6fc44a04 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -721,7 +721,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, return ret; *val *= scale; - return 0; + return ret; } else { ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); if (ret < 0) diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 09f53d987c7d..221fa2c552ae 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2) /* * Temperature is returned in Celsius degrees in fractional - * form down 2^16. We rescale by x1000 to return milli Celsius - * to respect IIO ABI. + * form down 2^16. We rescale by x1000 to return millidegrees + * Celsius to respect IIO ABI. */ - *val = raw_temp * 1000; - *val2 = 16; - return IIO_VAL_FRACTIONAL_LOG2; + raw_temp = sign_extend32(raw_temp, 23); + *val = ((s64)raw_temp * 1000) / (1 << 16); + return IIO_VAL_INT; } static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2) diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c index 1f5c962c1818..f7f88498ba0e 100644 --- a/drivers/iio/temperature/mlx90635.c +++ b/drivers/iio/temperature/mlx90635.c @@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client) "failed to allocate regmap\n"); regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee); - if (IS_ERR(regmap)) - return dev_err_probe(&client->dev, PTR_ERR(regmap), - "failed to allocate regmap\n"); + if (IS_ERR(regmap_ee)) + return dev_err_probe(&client->dev, PTR_ERR(regmap_ee), + "failed to allocate EEPROM regmap\n"); mlx90635 = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index bbd366dcb69a..6a42b27c4599 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -71,7 +71,6 @@ struct silead_ts_data { struct regulator_bulk_data regulators[2]; char fw_name[64]; struct touchscreen_properties prop; - u32 max_fingers; u32 chip_id; struct input_mt_pos pos[SILEAD_MAX_FINGERS]; int slots[SILEAD_MAX_FINGERS]; @@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data) touchscreen_parse_properties(data->input, true, &data->prop); silead_apply_efi_fw_min_max(data); - input_mt_init_slots(data->input, data->max_fingers, + input_mt_init_slots(data->input, SILEAD_MAX_FINGERS, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); @@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client) return; } - if (buf[0] > data->max_fingers) { + if (buf[0] > SILEAD_MAX_FINGERS) { dev_warn(dev, "More touches reported then supported %d > %d\n", - buf[0], data->max_fingers); - buf[0] = data->max_fingers; + buf[0], SILEAD_MAX_FINGERS); + buf[0] = SILEAD_MAX_FINGERS; } if (silead_ts_handle_pen_data(data, buf)) @@ -315,7 +314,6 @@ sync: static int silead_ts_init(struct i2c_client *client) { - struct silead_ts_data *data = i2c_get_clientdata(client); int error; error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET, @@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client) usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX); error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR, - data->max_fingers); + SILEAD_MAX_FINGERS); if (error) goto i2c_write_err; usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX); @@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client) const char *str; int error; - error = device_property_read_u32(dev, "silead,max-fingers", - &data->max_fingers); - if (error) { - dev_dbg(dev, "Max fingers read error %d\n", error); - data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ - } - error = device_property_read_string(dev, "firmware-name", &str); if (!error) snprintf(data->fw_name, sizeof(data->fw_name), diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 2fde1302a584..2d5945c982bd 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void) static inline bool amd_iommu_gt_ppr_supported(void) { return (check_feature(FEATURE_GT) && - check_feature(FEATURE_PPR)); + check_feature(FEATURE_PPR) && + check_feature(FEATURE_EPHSUP)); } static inline u64 iommu_virt_to_phys(void *vaddr) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index a18e74878f68..161248067776 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void) } } +static void __init free_sysfs(struct amd_iommu *iommu) +{ + if (iommu->iommu.dev) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { + free_sysfs(iommu); free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); @@ -3353,7 +3362,7 @@ int amd_iommu_reenable(int mode) return 0; } -int __init amd_iommu_enable_faulting(unsigned int cpu) +int amd_iommu_enable_faulting(unsigned int cpu) { /* We enable MSI later when PCI is initialized */ return 0; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 52d83730a22a..c2703599bb16 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); - struct pci_dev *pdev; int ret = 0; /* Update data structures */ @@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data, domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; - pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + /* Setup GCR3 table */ if (pdom_is_sva_capable(domain)) { ret = init_gcr3_table(dev_data, domain); if (ret) return ret; - - if (pdev) { - pdev_enable_caps(pdev); - - /* - * Device can continue to function even if IOPF - * enablement failed. Hence in error path just - * disable device PRI support. - */ - if (amd_iommu_iopf_add_device(iommu, dev_data)) - pdev_disable_cap_pri(pdev); - } - } else if (pdev) { - pdev_enable_cap_ats(pdev); } - /* Update device table */ - amd_iommu_dev_update_dte(dev_data, true); - return ret; } @@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev) do_detach(dev_data); +out: + spin_unlock(&dev_data->lock); + + spin_unlock_irqrestore(&domain->lock, flags); + /* Remove IOPF handler */ if (ppr) amd_iommu_iopf_remove_device(iommu, dev_data); @@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev) if (dev_is_pci(dev)) pdev_disable_caps(to_pci_dev(dev)); -out: - spin_unlock(&dev_data->lock); - - spin_unlock_irqrestore(&domain->lock, flags); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) @@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + struct pci_dev *pdev; int ret; /* @@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + + /* Update device table */ + amd_iommu_dev_update_dte(dev_data, true); return ret; } diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 091423bb8aac..7c67d69f0b8c 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu) if (iommu->iopf_queue) return ret; - snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), - "amdiommu-%#x-iopfq", + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x", PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid)); iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name); @@ -249,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, int amd_iommu_iopf_add_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; int ret = 0; if (!dev_data->pri_enabled) return ret; - raw_spin_lock_irqsave(&iommu->lock, flags); - - if (!iommu->iopf_queue) { - ret = -EINVAL; - goto out_unlock; - } + if (!iommu->iopf_queue) + return -EINVAL; ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev); if (ret) - goto out_unlock; + return ret; dev_data->ppr = true; - -out_unlock: - raw_spin_unlock_irqrestore(&iommu->lock, flags); - return ret; + return 0; } /* Its assumed that caller has verified that device was added to iopf queue */ void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; - - raw_spin_lock_irqsave(&iommu->lock, flags); - iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev); dev_data->ppr = false; - - raw_spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f731e4b2a417..43520e7275cc 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev /* Check the domain allows at least some access to the device... */ if (map) { - dma_addr_t base = dma_range_map_min(map); - if (base > domain->geometry.aperture_end || + if (dma_range_map_min(map) > domain->geometry.aperture_end || dma_range_map_max(map) < domain->geometry.aperture_start) { pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT; } - /* ...then finally give it a kicking to make sure it fits */ - base_pfn = max(base, domain->geometry.aperture_start) >> order; } + /* ...then finally give it a kicking to make sure it fits */ + base_pfn = max_t(unsigned long, base_pfn, + domain->geometry.aperture_start >> order); /* start_pfn is always nonzero for an already-initialised domain */ mutex_lock(&cookie->mutex); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 40ebf1726393..3c755d5dad6e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; if (!info->map) return -EINVAL; - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), GFP_ATOMIC); - if (!maps) { - ret = -ENOMEM; - goto out; - } + if (!maps) + return -ENOMEM; its_dev->event_map.vm = info->map->vm; its_dev->event_map.vlpi_maps = maps; } else if (its_dev->event_map.vm != info->map->vm) { - ret = -EINVAL; - goto out; + return -EINVAL; } /* Get our private copy of the mapping information */ @@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) its_dev->event_map.nr_vlpis++; } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map; - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); map = get_vlpi_map(d); - if (!its_dev->event_map.vm || !map) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !map) + return -EINVAL; /* Copy our mapping information to the incoming request */ *info->map = *map; -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_unmap(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; /* Drop the virtual mapping */ its_send_discard(its_dev, event); @@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d) kfree(its_dev->event_map.vlpi_maps); } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) @@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) if (!is_v4(its_dev->its)) return -EINVAL; + guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); + /* Unmap request? */ if (!info) return its_vlpi_unmap(d); diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 9e71c4428814..4f3a12383a1e 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { - struct fwnode_handle *fn; struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; + int rc; rintc = (struct acpi_madt_rintc *)header; @@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, return -ENOMEM; } - return riscv_intc_init_common(fn, &riscv_intc_chip); + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); + + return rc; } IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 8fb183ced1e7..9e22f7e378f5 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -85,7 +85,7 @@ struct plic_handler { struct plic_priv *priv; }; static int plic_parent_irq __ro_after_init; -static bool plic_cpuhp_setup_done __ro_after_init; +static bool plic_global_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static int plic_irq_set_type(struct irq_data *d, unsigned int type); @@ -487,10 +487,8 @@ static int plic_probe(struct platform_device *pdev) unsigned long plic_quirks = 0; struct plic_handler *handler; u32 nr_irqs, parent_hwirq; - struct irq_domain *domain; struct plic_priv *priv; irq_hw_number_t hwirq; - bool cpuhp_setup; if (is_of_node(dev->fwnode)) { const struct of_device_id *id; @@ -549,14 +547,6 @@ static int plic_probe(struct platform_device *pdev) continue; } - /* Find parent domain and register chained handler */ - domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); - if (!plic_parent_irq && domain) { - plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); - if (plic_parent_irq) - irq_set_chained_handler(plic_parent_irq, plic_handle_irq); - } - /* * When running in M-mode we need to ignore the S-mode handler. * Here we assume it always comes later, but that might be a @@ -597,25 +587,35 @@ done: goto fail_cleanup_contexts; /* - * We can have multiple PLIC instances so setup cpuhp state + * We can have multiple PLIC instances so setup global state * and register syscore operations only once after context * handlers of all online CPUs are initialized. */ - if (!plic_cpuhp_setup_done) { - cpuhp_setup = true; + if (!plic_global_setup_done) { + struct irq_domain *domain; + bool global_setup = true; + for_each_online_cpu(cpu) { handler = per_cpu_ptr(&plic_handlers, cpu); if (!handler->present) { - cpuhp_setup = false; + global_setup = false; break; } } - if (cpuhp_setup) { + + if (global_setup) { + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (domain) + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (plic_parent_irq) + irq_set_chained_handler(plic_parent_irq, plic_handle_irq); + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, "irqchip/sifive/plic:starting", plic_starting_cpu, plic_dying_cpu); register_syscore_ops(&plic_irq_syscore_ops); - plic_cpuhp_setup_done = true; + plic_global_setup_done = true; } } diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 24fcff682b24..ba1be15cfd8e 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent, led_init_core(led_cdev); #ifdef CONFIG_LEDS_TRIGGERS - /* - * If no default trigger was given and hw_control_trigger is set, - * make it the default trigger. - */ - if (!led_cdev->default_trigger && led_cdev->hw_control_trigger) - led_cdev->default_trigger = led_cdev->hw_control_trigger; led_trigger_set_default(led_cdev); #endif diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c index 40a8ebfcfce2..4bd4e324abc9 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c @@ -301,10 +301,10 @@ static int ipu6_isys_stream_start(struct ipu6_isys_video *av, out_requeue: if (bl && bl->nbufs) ipu6_isys_buffer_list_queue(bl, - (IPU6_ISYS_BUFFER_LIST_FL_INCOMING | - error) ? + IPU6_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : - 0, error ? VB2_BUF_STATE_ERROR : + 0), error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); flush_firmware_streamon_fail(stream); diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c index 5992138c7290..8b9b77719bb1 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -678,6 +678,12 @@ static int isys_notifier_bound(struct v4l2_async_notifier *notifier, container_of(asc, struct sensor_async_sd, asc); int ret; + if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { + dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", + s_asd->csi2.port); + return -EINVAL; + } + ret = ipu_bridge_instantiate_vcm(sd->dev); if (ret) { dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); @@ -925,39 +931,18 @@ static const struct dev_pm_ops isys_pm_ops = { .resume = isys_resume, }; -static void isys_remove(struct auxiliary_device *auxdev) +static void free_fw_msg_bufs(struct ipu6_isys *isys) { - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); - struct ipu6_device *isp = adev->isp; + struct device *dev = &isys->adev->auxdev.dev; struct isys_fw_msgs *fwmsg, *safe; - unsigned int i; list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); + dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); - - isys_unregister_devices(isys); - isys_notifier_cleanup(isys); - - cpu_latency_qos_remove_request(&isys->pm_qos); - - if (!isp->secure_mode) { - ipu6_cpd_free_pkg_dir(adev); - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); - release_firmware(adev->fw); - } - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); + dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); } static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) @@ -1140,12 +1125,14 @@ static int isys_probe(struct auxiliary_device *auxdev, ret = isys_register_devices(isys); if (ret) - goto out_remove_pkg_dir_shared_buffer; + goto free_fw_msg_bufs; ipu6_mmu_hw_cleanup(adev->mmu); return 0; +free_fw_msg_bufs: + free_fw_msg_bufs(isys); out_remove_pkg_dir_shared_buffer: if (!isp->secure_mode) ipu6_cpd_free_pkg_dir(adev); @@ -1167,6 +1154,34 @@ release_firmware: return ret; } +static void isys_remove(struct auxiliary_device *auxdev) +{ + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); + struct ipu6_device *isp = adev->isp; + unsigned int i; + + free_fw_msg_bufs(isys); + + isys_unregister_devices(isys); + isys_notifier_cleanup(isys); + + cpu_latency_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu6_cpd_free_pkg_dir(adev); + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); + release_firmware(adev->fw); + } + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +} + struct fwmsg { int type; char *msg; diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index d2bebd208461..bbd646378ab3 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -285,7 +285,7 @@ EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6); #define IPU6_ISYS_CSI2_NPORTS 4 #define IPU6SE_ISYS_CSI2_NPORTS 4 #define IPU6_TGL_ISYS_CSI2_NPORTS 8 -#define IPU6EP_MTL_ISYS_CSI2_NPORTS 4 +#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 static void ipu6_internal_pdata_init(struct ipu6_device *isp) { @@ -727,9 +727,6 @@ static void ipu6_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - release_firmware(isp->cpd_fw); ipu6_mmu_cleanup(psys_mmu); diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index 89b582a221ab..f04a89584334 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -677,10 +677,13 @@ static int mei_csi_probe(struct mei_cl_device *cldev, return -ENODEV; ret = ipu_bridge_init(&ipu->dev, ipu_bridge_parse_ssdb); + put_device(&ipu->dev); if (ret < 0) return ret; - if (WARN_ON(!dev_fwnode(dev))) + if (!dev_fwnode(dev)) { + dev_err(dev, "mei-csi probed without device fwnode!\n"); return -ENXIO; + } csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL); if (!csi) diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c index 60498a5abebf..ab4f07e2e560 100644 --- a/drivers/media/pci/mgb4/mgb4_core.c +++ b/drivers/media/pci/mgb4/mgb4_core.c @@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev) struct mgb4_dev *mgbdev = pci_get_drvdata(pdev); int i; -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(mgbdev->debugfs); -#endif #if IS_REACHABLE(CONFIG_HWMON) hwmon_device_unregister(mgbdev->hwmon_dev); #endif @@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev) if (mgbdev->vin[i]) mgb4_vin_free(mgbdev->vin[i]); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mgbdev->debugfs); +#endif + device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups); free_spi(mgbdev); free_i2c(mgbdev); diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c index 1280696f65f2..e80fb4ebfda6 100644 --- a/drivers/media/pci/saa7134/saa7134-cards.c +++ b/drivers/media/pci/saa7134/saa7134-cards.c @@ -5152,7 +5152,7 @@ struct saa7134_board saa7134_boards[] = { }, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = { - /* Andy Shevchenko <andy@smile.org.ua> */ + /* Andy Shevchenko <andy@kernel.org> */ .name = "Avermedia AVerTV Studio 507UA", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */ diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c index 32af2b14ff34..34c9be437432 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c @@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]), GFP_KERNEL); - if (!aux_bus->aux_device_wrapper[1]) - return -ENOMEM; + if (!aux_bus->aux_device_wrapper[1]) { + retval = -ENOMEM; + goto err_aux_dev_add_0; + } retval = ida_alloc(&gp_client_ida, GFP_KERNEL); if (retval < 0) @@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_aux_dev_add_1: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev); + goto err_aux_dev_add_0; err_aux_dev_init_1: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id); @@ -120,6 +123,7 @@ err_ida_alloc_1: err_aux_dev_add_0: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev); + goto err_ret; err_aux_dev_init_0: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id); @@ -127,6 +131,7 @@ err_aux_dev_init_0: err_ida_alloc_0: kfree(aux_bus->aux_device_wrapper[0]); +err_ret: return retval; } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 79e6f3c1341f..40c3fe26f76d 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, } if (!mei_cl_is_connected(cl)) { - cl_err(dev, cl, "is not connected"); + cl_dbg(dev, cl, "is not connected"); rets = -ENODEV; goto out; } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7f59dd38c32f..6589635f8ba3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device) } err = mei_restart(dev); - if (err) + if (err) { + free_irq(pdev->irq, dev); return err; + } /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index b543e6b9f3cf..1ec65d87488a 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev) static int mei_vsc_suspend(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_device *mei_dev; + int ret = 0; - mei_stop(mei_dev); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; - mei_disable_interrupts(mei_dev); + mutex_lock(&mei_dev->device_lock); - vsc_tp_free_irq(hw->tp); + if (!mei_write_is_idle(mei_dev)) + ret = -EAGAIN; - return 0; + mutex_unlock(&mei_dev->device_lock); + + return ret; } static int mei_vsc_resume(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); - int ret; - - ret = vsc_tp_request_irq(hw->tp); - if (ret) - return ret; - - ret = mei_restart(mei_dev); - if (ret) - goto err_free; + struct mei_device *mei_dev; - /* start timer if stopped in suspend */ - schedule_delayed_work(&mei_dev->timer_work, HZ); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; return 0; - -err_free: - vsc_tp_free_irq(hw->tp); - - return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index ffa4ccd96a10..596a9d695dfc 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader, { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER }; union acpi_object obj = { - .type = ACPI_TYPE_INTEGER, + .integer.type = ACPI_TYPE_INTEGER, .integer.value = 1, }; struct acpi_object_list arg_list = { diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c index 811ebeeff4ed..43ac68052baf 100644 --- a/drivers/net/dsa/qca/qca8k-leds.c +++ b/drivers/net/dsa/qca/qca8k-leds.c @@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", priv->internal_mdio_bus->id, port_num); - if (!init_data.devicename) + if (!init_data.devicename) { + fwnode_handle_put(led); + fwnode_handle_put(leds); return -ENOMEM; + } ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data); if (ret) @@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p kfree(init_data.devicename); } + fwnode_handle_put(leds); return 0; } @@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv) * the correct port for LED setup. */ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num)); - if (ret) + if (ret) { + fwnode_handle_put(port); + fwnode_handle_put(ports); return ret; + } } + fwnode_handle_put(ports); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 656ab81c0272..bbc7edccd5a4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1434,6 +1434,57 @@ struct bnxt_l2_filter { atomic_t refcnt; }; +/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The + * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h. + * The last valid byte in the compat version is different. + */ +struct hwrm_port_phy_qcfg_output_compat { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + u8 active_fec_signal_mode; + __le16 link_speed; + u8 duplex_cfg; + u8 pause; + __le16 support_speeds; + __le16 force_link_speed; + u8 auto_mode; + u8 auto_pause; + __le16 auto_link_speed; + __le16 auto_link_speed_mask; + u8 wirespeed; + u8 lpbk; + u8 force_pause; + u8 module_status; + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + u8 media_type; + u8 xcvr_pkg_type; + u8 eee_config_phy_addr; + u8 parallel_detect; + __le16 link_partner_adv_speeds; + u8 link_partner_adv_auto_mode; + u8 link_partner_adv_pause; + __le16 adv_eee_link_speed_mask; + __le16 link_partner_adv_eee_link_speed_mask; + __le32 xcvr_identifier_type_tx_lpi_timer; + __le16 fec_cfg; + u8 duplex_state; + u8 option_flags; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + __le16 force_pam4_link_speed; + __le16 auto_pam4_link_speed_mask; + u8 link_partner_pam4_adv_speeds; + u8 valid; +}; + struct bnxt_link_info { u8 phy_type; u8 media_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 1df3d56cc4b5..d2fd2d04ed47 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) req_type); else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE) hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - req_type, token->seq_id, rc); + req_type, le16_to_cpu(ctx->req->seq_id), rc); rc = __hwrm_to_stderr(rc); exit: if (token) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 175192ebaa77..22898d3d088b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_fwd_resp_input *req; int rc; - if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { + netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n", + msg_size); return -EINVAL; + } rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); if (!rc) { @@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) rc = bnxt_hwrm_exec_fwd_resp( bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); } else { - struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {}; struct hwrm_port_phy_qcfg_input *phy_qcfg_req; phy_qcfg_req = @@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + /* New SPEEDS2 fields are beyond the legacy structure, so + * clear the SPEEDS2_SUPPORTED flag. + */ + phy_qcfg_resp.option_flags &= + ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED; phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 96c6ea12279f..989b4ddae342 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct, pg_info->page_offset; memcpy(skb->data, va, MIN_SKB_SIZE); skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); } else { struct octeon_skb_page_info *pg_info = ((struct octeon_skb_page_info *)(skb->cb)); diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index c1c912de59c7..1154c1d8f66f 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb, skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); } -static void gve_rx_free_skb(struct gve_rx_ring *rx) +static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) { if (!rx->ctx.skb_head) return; + if (rx->ctx.skb_head == napi->skb) + napi->skb = NULL; dev_kfree_skb_any(rx->ctx.skb_head); rx->ctx.skb_head = NULL; rx->ctx.skb_tail = NULL; @@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); if (err < 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); if (err == -ENOMEM) rx->rx_skb_alloc_fail++; @@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) /* gve_rx_complete_skb() will consume skb if successful */ if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); rx->rx_desc_err_dropped_pkt++; u64_stats_update_end(&rx->statss); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index fe1b26a4d736..0b3cca3fc792 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb) if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) return -1; + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + return -EINVAL; + /* Needed because we will modify header. */ err = skb_cow_head(skb, 0); if (err < 0) return err; tcp = tcp_hdr(skb); - - /* Remove payload length from checksum. */ paylen = skb->len - skb_transport_offset(skb); - - switch (skb_shinfo(skb)->gso_type) { - case SKB_GSO_TCPV4: - case SKB_GSO_TCPV6: - csum_replace_by_diff(&tcp->check, - (__force __wsum)htonl(paylen)); - - /* Compute length of segmentation header. */ - header_len = skb_tcp_all_headers(skb); - break; - default: - return -EINVAL; - } + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); + header_len = skb_tcp_all_headers(skb); if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ff71fb1eced9..a5fc0209d628 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; + + if (!(i % HNS3_RESCHED_BD_NUM)) + cond_resched(); } return 0; @@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) } u64_stats_init(&priv->ring[i].syncp); + cond_resched(); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index acd756b0c7c9..d36c4ed16d8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -214,6 +214,8 @@ enum hns3_nic_state { #define HNS3_CQ_MODE_EQE 1U #define HNS3_CQ_MODE_CQE 0U +#define HNS3_RESCHED_BD_NUM 1024 + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 43cc6ee4d87d..82574ce0194f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev) static void hclge_update_link_status(struct hclge_dev *hdev) { - struct hnae3_handle *rhandle = &hdev->vport[0].roce; struct hnae3_handle *handle = &hdev->vport[0].nic; - struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; int state; int ret; @@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev) client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); - if (rclient && rclient->ops->link_status_change) - rclient->ops->link_status_change(rhandle, state); + + if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_client *rclient = hdev->roce_client; + + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); + } hclge_push_link_status(hdev); } @@ -11319,6 +11324,12 @@ clear_roce: return ret; } +static bool hclge_uninit_need_wait(struct hclge_dev *hdev) +{ + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); +} + static void hclge_uninit_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (hdev->roce_client) { clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); - while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + while (hclge_uninit_need_wait(hdev)) msleep(HCLGE_WAIT_RESET_DONE); hdev->roce_client->ops->uninit_instance(&vport->roce, 0); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 6ad8002b22e1..99a75a59078e 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -409,7 +409,6 @@ struct ice_vsi { struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -747,6 +746,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) } /** + * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence + * + * Return: A pointer to xsk_buff_pool structure if there is a buffer pool + * attached and configured as zero-copy, NULL otherwise. + */ +static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, + u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); + + if (!ice_is_xdp_ena_vsi(vsi)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + +/** * ice_xsk_pool - get XSK buffer pool bound to a ring * @ring: Rx ring to use * @@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) struct ice_vsi *vsi = ring->vsi; u16 qid = ring->q_index; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) - return NULL; - - return xsk_get_pool_from_qid(vsi->netdev, qid); + return ice_get_xp_from_qid(vsi, qid); } /** @@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) if (!ring) return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { - ring->xsk_pool = NULL; - return; - } - - ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); + ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); } /** @@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); + +enum ice_xdp_cfg { + ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ + ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ +}; + int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); -int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type); +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); +void ice_map_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 687f6cb2b917..5d396c1a7731 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) } rx_rings_rem -= rx_rings_per_v; } + + if (ice_is_xdp_ena_vsi(vsi)) + ice_map_xdp_rings(vsi); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5371e91f6bbb..7629b0190578 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->q_vectors) goto err_vectors; - vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); - if (!vsi->af_xdp_zc_qps) - goto err_zc_qps; - return 0; -err_zc_qps: - devm_kfree(dev, vsi->q_vectors); err_vectors: devm_kfree(dev, vsi->rxq_map); err_rxq_map: @@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); - bitmap_free(vsi->af_xdp_zc_qps); - vsi->af_xdp_zc_qps = NULL; /* free the ring and vector containers */ devm_kfree(dev, vsi->q_vectors); vsi->q_vectors = NULL; @@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) if (ret) goto unroll_vector_base; - ice_vsi_map_rings_to_vectors(vsi); - - /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi); - - vsi->stat_offsets_loaded = false; - if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) goto unroll_vector_base; - ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, + ICE_XDP_CFG_PART); if (ret) goto unroll_vector_base; } + ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi); + + vsi->stat_offsets_loaded = false; + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at @@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi) /* return value check can be skipped here, it always returns * 0 if reset is in progress */ - ice_destroy_xdp_rings(vsi); + ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); ice_vsi_clear_rings(vsi); ice_vsi_free_q_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f60c022f7960..1b61ca3a6eb6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2707,17 +2707,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) bpf_prog_put(old_prog); } +static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) +{ + struct ice_q_vector *q_vector; + struct ice_tx_ring *ring; + + if (static_key_enabled(&ice_xdp_locking_key)) + return vsi->xdp_rings[qid % vsi->num_xdp_txq]; + + q_vector = vsi->rx_rings[qid]->q_vector; + ice_for_each_tx_ring(ring, q_vector->tx) + if (ice_ring_is_xdp(ring)) + return ring; + + return NULL; +} + +/** + * ice_map_xdp_rings - Map XDP rings to interrupt vectors + * @vsi: the VSI with XDP rings being configured + * + * Map XDP rings to interrupt vectors and perform the configuration steps + * dependent on the mapping. + */ +void ice_map_xdp_rings(struct ice_vsi *vsi) +{ + int xdp_rings_rem = vsi->num_xdp_txq; + int v_idx, q_idx; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + ice_for_each_rxq(vsi, q_idx) { + vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, + q_idx); + ice_tx_xsk_pool(vsi, q_idx); + } +} + /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP * @prog: bpf program that will be assigned to VSI + * @cfg_type: create from scratch or restore the existing configuration * * Return 0 on success and negative value on error */ -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int xdp_rings_rem = vsi->num_xdp_txq; struct ice_pf *pf = vsi->back; struct ice_qs_cfg xdp_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, @@ -2730,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .mapping_mode = ICE_VSI_MAP_CONTIG }; struct device *dev; - int i, v_idx; - int status; + int status, i; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2750,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) if (ice_xdp_alloc_setup_rings(vsi)) goto clear_xdp_rings; - /* follow the logic from ice_vsi_map_rings_to_vectors */ - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - int xdp_rings_per_v, q_id, q_base; - - xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, - vsi->num_q_vectors - v_idx); - q_base = vsi->num_xdp_txq - xdp_rings_rem; - - for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { - struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; - - xdp_ring->q_vector = q_vector; - xdp_ring->next = q_vector->tx.tx_ring; - q_vector->tx.tx_ring = xdp_ring; - } - xdp_rings_rem -= xdp_rings_per_v; - } - - ice_for_each_rxq(vsi, i) { - if (static_key_enabled(&ice_xdp_locking_key)) { - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; - } else { - struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) { - if (ice_ring_is_xdp(ring)) { - vsi->rx_rings[i]->xdp_ring = ring; - break; - } - } - } - ice_tx_xsk_pool(vsi, i); - } - /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called */ - if (ice_is_reset_in_progress(pf->state)) + if (cfg_type == ICE_XDP_CFG_PART) return 0; + ice_map_xdp_rings(vsi); + /* tell the Tx scheduler that right now we have * additional queues */ @@ -2842,22 +2862,21 @@ err_map_xdp: /** * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings * @vsi: VSI to remove XDP rings + * @cfg_type: disable XDP permanently or allow it to be restored later * * Detach XDP rings from irq vectors, clean up the PF bitmap and free * resources */ -int ice_destroy_xdp_rings(struct ice_vsi *vsi) +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; int i, v_idx; /* q_vectors are freed in reset path so there's no point in detaching - * rings; in case of rebuild being triggered not from reset bits - * in pf->state won't be set, so additionally check first q_vector - * against NULL + * rings */ - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; ice_for_each_q_vector(vsi, v_idx) { @@ -2898,7 +2917,7 @@ free_qmap: if (static_key_enabled(&ice_xdp_locking_key)) static_branch_dec(&ice_xdp_locking_key); - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) return 0; ice_vsi_assign_bpf_prog(vsi, NULL); @@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); } else { - xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, + ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } @@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); - xdp_ring_err = ice_destroy_xdp_rings(vsi); + xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); /* reallocate Rx queues that were used for zero-copy */ diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 84eab92dc03c..59e8879ac059 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. + * + * Note that the Shadow RAM copy is always located after the CSS header, and + * is aligned to 64-byte (32-word) offsets. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); + u32 sr_copy; + + switch (bank) { + case ICE_ACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32); + break; + case ICE_INACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32); + break; + } + + return ice_read_nvm_module(hw, bank, sr_copy + offset, data); } /** @@ -440,8 +454,7 @@ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - u16 pfa_len, pfa_ptr; - u16 next_tlv; + u16 pfa_len, pfa_ptr, next_tlv, max_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); @@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } + + /* The Preserved Fields Area contains a sequence of Type-Length-Value + * structures which define its contents. The PFA length includes all + * of the TLVs, plus the initial length word itself, *and* one final + * word at the end after all of the TLVs. + */ + if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) { + dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", + pfa_ptr, pfa_len); + return -EINVAL; + } + /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { + while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; @@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, } return -EINVAL; } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; + + if (check_add_overflow(next_tlv, 2, &next_tlv) || + check_add_overflow(next_tlv, tlv_len, &next_tlv)) { + dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", + tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); + return -EINVAL; + } } /* Module does not exist */ return -ENOENT; @@ -1010,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) } /** + * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the Authentication + * header size, and then convert to words. + * + * Return: zero on success, or a negative error code on failure. + */ +static int +ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (status) + return status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (status) + return status; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size + */ + hdr_len_dword = hdr_len_h << 16 | hdr_len_l; + *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; + + return 0; +} + +/** + * ice_determine_css_hdr_len - Discover CSS header length for the device + * @hw: pointer to the HW struct + * + * Determine the size of the CSS header at the start of the NVM module. This + * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is + * always located just after the CSS header. + * + * Return: zero on success, or a negative error code on failure. + */ +static int ice_determine_css_hdr_len(struct ice_hw *hw) +{ + struct ice_bank_info *banks = &hw->flash.banks; + int status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK, + &banks->active_css_hdr_len); + if (status) + return status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK, + &banks->inactive_css_hdr_len); + if (status) + return status; + + return 0; +} + +/** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * @@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw) return status; } + status = ice_determine_css_hdr_len(hw); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n"); + return status; + } + status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f0796a93f428..eef397e5baa0 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -482,6 +482,8 @@ struct ice_bank_info { u32 orom_size; /* Size of OROM bank */ u32 netlist_ptr; /* Pointer to 1st Netlist bank */ u32 netlist_size; /* Size of Netlist bank */ + u32 active_css_hdr_len; /* Active CSS header length */ + u32 inactive_css_hdr_len; /* Inactive CSS header length */ enum ice_flash_bank nvm_bank; /* Active NVM bank */ enum ice_flash_bank orom_bank; /* Active OROM bank */ enum ice_flash_bank netlist_bank; /* Active Netlist bank */ @@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* CSS Header words */ +#define ICE_NVM_CSS_HDR_LEN_L 0x02 +#define ICE_NVM_CSS_HDR_LEN_H 0x03 #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 -/* Length of CSS header section in words */ -#define ICE_CSS_HEADER_LENGTH 330 - -/* Offset of Shadow RAM copy in the NVM bank area. */ -#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32) - -/* Size in bytes of Option ROM trailer */ -#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) +/* Length of Authentication header section in words */ +#define ICE_NVM_AUTH_HEADER_LEN 0x08 /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 7541f223bf4f..a65955eb23c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) if (!pool) return -EINVAL; - clear_bit(qid, vsi->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); return 0; @@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) if (err) return err; - set_bit(qid, vsi->af_xdp_zc_qps); - return 0; } @@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) { struct ice_rx_ring *rx_ring; - unsigned long q; + uint i; + + ice_for_each_rxq(vsi, i) { + rx_ring = vsi->rx_rings[i]; + if (!rx_ring->xsk_pool) + continue; - for_each_set_bit(q, vsi->af_xdp_zc_qps, - max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { - rx_ring = vsi->rx_rings[q]; if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index f2c4f1966bb0..0cd2bd695db1 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev, struct igc_hw *hw = &adapter->hw; u32 eeer; + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->supported); + if (hw->dev_spec._base.eee_enable) mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert); - *edata = adapter->eee; - eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 12f004f46082..87b655b839c1 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,7 @@ #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> #include <linux/pci.h> +#include <linux/mdio.h> #include <net/ipv6.h> @@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter) /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); + + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T | + MDIO_EEE_2_5GT; } /** @@ -7028,6 +7032,8 @@ static int igc_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGC_FLAG_WOL_SUPPORTED); + igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); /* reset the hardware with the new settings */ @@ -7049,9 +7055,6 @@ static int igc_probe(struct pci_dev *pdev, /* Check if Media Autosense is enabled */ adapter->ei = *ei; - /* do hw tstamp init after resetting */ - igc_ptp_init(adapter); - /* print pcie link status and MAC address */ pcie_print_link_status(pdev); netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index e8b73b9d75e3..97722ce8c4cb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, * - when available free entries are less. * Lower priority ones out of avaialble free entries are always * chosen when 'high vs low' question arises. + * + * For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF then allocate low + * priority entries. */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; /* Get the search range for priority allocation request */ if (req->priority) { @@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } - /* For a VF base MCAM match rule is set by its PF. And all the - * further MCAM rules installed by VF on its own are - * concatenated with the base rule set by its PF. Hence PF entries - * should be at lower priority compared to VF entries. Otherwise - * base rule is hit always and rules installed by VF will be of - * no use. Hence if the request is from PF and NOT a priority - * allocation request then allocate low priority entries. - */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) - goto lprio_alloc; - /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2568,6 +2567,18 @@ lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; + /* Ensure PF requests are always at bottom and if PF requests + * for higher/lower priority entry wrt reference entry then + * honour that criteria and start search for entries from bottom + * and not in mid zone. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_HIGHER_PRIO) + end = req->ref_entry; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_LOWER_PRIO) + start = req->ref_entry; } alloc: diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index cae46290a7ae..c84ce54a84a0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; - int cnt = MTK_QDMA_RING_SIZE; + int cnt = soc->tx.fq_dma_size; dma_addr_t dma_addr; - int i; + int i, j, len; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) eth->scratch_ring = eth->sram_base; @@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) cnt * soc->tx.desc_size, ð->phy_scratch_ring, GFP_KERNEL); + if (unlikely(!eth->scratch_ring)) return -ENOMEM; - eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - if (unlikely(!eth->scratch_head)) - return -ENOMEM; + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); - dma_addr = dma_map_single(eth->dma_dev, - eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; + for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) { + len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH); + eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); + if (unlikely(!eth->scratch_head[j])) + return -ENOMEM; - for (i = 0; i < cnt; i++) { - dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE; - struct mtk_tx_dma_v2 *txd; + dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); - txd = eth->scratch_ring + i * soc->tx.desc_size; - txd->txd1 = addr; - if (i < cnt - 1) - txd->txd2 = eth->phy_scratch_ring + - (i + 1) * soc->tx.desc_size; + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; - txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); - if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) - txd->txd3 |= TX_DMA_PREP_ADDR64(addr); - txd->txd4 = 0; - if (mtk_is_netsys_v2_or_greater(eth)) { - txd->txd5 = 0; - txd->txd6 = 0; - txd->txd7 = 0; - txd->txd8 = 0; + for (i = 0; i < cnt; i++) { + struct mtk_tx_dma_v2 *txd; + + txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; + txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; + if (j * MTK_FQ_DMA_LENGTH + i < cnt) + txd->txd2 = eth->phy_scratch_ring + + (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size; + + txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); + if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) + txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE); + + txd->txd4 = 0; + if (mtk_is_netsys_v2_or_greater(eth)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } } } @@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) ring_size = MTK_QDMA_RING_SIZE; else - ring_size = MTK_DMA_SIZE; + ring_size = soc->tx.dma_size; ring->buf = kcalloc(ring_size, sizeof(*ring->buf), GFP_KERNEL); @@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) goto no_tx_mem; if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { - ring->dma = eth->sram_base + ring_size * sz; - ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; + ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; + ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; } else { ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, &ring->phys, GFP_KERNEL); @@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; + const struct mtk_soc_data *soc = eth->soc; struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size, tx_ring_size; int i; @@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) tx_ring_size = MTK_QDMA_RING_SIZE; else - tx_ring_size = MTK_DMA_SIZE; + tx_ring_size = soc->tx.dma_size; if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) @@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) rx_dma_size = MTK_HW_LRO_DMA_SIZE; } else { rx_data_len = ETH_DATA_LEN; - rx_dma_size = MTK_DMA_SIZE; + rx_dma_size = soc->rx.dma_size; } ring->frag_size = mtk_max_frag_size(rx_data_len); @@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth) mtk_rx_clean(eth, ð->rx_ring[i], false); } - kfree(eth->scratch_head); + for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) { + kfree(eth->scratch_head[i]); + eth->scratch_head[i] = NULL; + } } static bool mtk_hw_reset_check(struct mtk_eth *eth) @@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(4K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma_v2), @@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = { .dma_l4_valid = RX_DMA_L4_VALID_PDMA, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 4eab30b44070..f5174f6cb1bb 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -32,7 +32,9 @@ #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_QDMA_RING_SIZE 2048 -#define MTK_DMA_SIZE 512 +#define MTK_DMA_SIZE(x) (SZ_##x) +#define MTK_FQ_DMA_HEAD 32 +#define MTK_FQ_DMA_LENGTH 2048 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) #define MTK_DMA_DUMMY_DESC 0xffffffff @@ -1176,6 +1178,8 @@ struct mtk_soc_data { u32 desc_size; u32 dma_max_len; u32 dma_len_offset; + u32 dma_size; + u32 fq_dma_size; } tx; struct { u32 desc_size; @@ -1183,6 +1187,7 @@ struct mtk_soc_data { u32 dma_l4_valid; u32 dma_max_len; u32 dma_len_offset; + u32 dma_size; } rx; }; @@ -1264,7 +1269,7 @@ struct mtk_eth { struct napi_struct rx_napi; void *scratch_ring; dma_addr_t phy_scratch_ring; - void *scratch_head; + void *scratch_head[MTK_FQ_DMA_HEAD]; struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c53c99dde558..a605eae56685 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4875,7 +4875,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, /* Verify if UDP port is being offloaded by HW */ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) - return features; + return vxlan_features_check(skb, features); #if IS_ENABLED(CONFIG_GENEVE) /* Support Geneve offload for default UDP port */ @@ -4901,7 +4901,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct mlx5e_priv *priv = netdev_priv(netdev); features = vlan_features_check(skb, features); - features = vxlan_features_check(skb, features); /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2d95a9b7b44e..b61b7d966114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + return -EACCES; + } cond_resched(); } while (!time_after(jiffies, end)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index ad38e31822df..a6329ca2d9bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -248,6 +248,10 @@ recover_from_sw_reset: do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + goto unlock; + } msleep(20); } while (!time_after(jiffies, end)); @@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev) mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n"); return -ENODEV; } + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n"); + return -EACCES; + } msleep(100); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index c16b462ddedf..ab2717012b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, &dest, 1); if (IS_ERR(lag_definer->rules[idx])) { err = PTR_ERR(lag_definer->rules[idx]); - while (i--) - while (j--) + do { + while (j--) { + idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); + } + j = ldev->buckets; + } while (i--); goto destroy_fg; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c index 6b774e0c2766..d0b595ba6110 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c @@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev) ret = -EBUSY; goto pci_unlock; } + if (pci_channel_offline(dev->pdev)) { + ret = -EACCES; + goto pci_unlock; + } /* Check if semaphore is already locked */ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6574c145dc1e..459a836a5d9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) if (!err) mlx5_function_disable(dev, boot); + else + mlx5_stop_health_poll(dev, boot); + return err; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 24870da3f484..1934e9d6d9e4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) if (ret) return ret; - if (qcq->napi.poll) - napi_enable(&qcq->napi); - if (qcq->flags & IONIC_QCQ_F_INTR) { + napi_enable(&qcq->napi); irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 5dba6d2d633c..2427610f4306 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } + buf_info->page = NULL; stats->xdp_tx++; /* the Tx completion will free the buffers */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e254b21fdb59..65d7370b47d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -93,6 +93,7 @@ struct ethqos_emac_driver_data { bool has_emac_ge_3; const char *link_clk_name; bool has_integrated_pcs; + u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; }; @@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, .dma_chan_offset = 0x1000, @@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; if (data->has_integrated_pcs) plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; + if (data->dma_addr_width) + plat_dat->host_dma_width = data->dma_addr_width; if (ethqos->serdes_phy) { plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 222540b55480..1562fbdd0a04 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv, struct tc_cbs_qopt_offload *qopt) { u32 tx_queues_count = priv->plat->tx_queues_to_use; + s64 port_transmit_rate_kbps; u32 queue = qopt->queue; - u32 ptr, speed_div; u32 mode_to_use; u64 value; + u32 ptr; int ret; /* Queue 0 is not AVB capable */ @@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv, if (!priv->dma_cap.av) return -EOPNOTSUPP; + port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope; + /* Port Transmit Rate and Speed Divider */ - switch (priv->speed) { + switch (div_s64(port_transmit_rate_kbps, 1000)) { case SPEED_10000: - ptr = 32; - speed_div = 10000000; - break; case SPEED_5000: ptr = 32; - speed_div = 5000000; break; case SPEED_2500: - ptr = 8; - speed_div = 2500000; - break; case SPEED_1000: ptr = 8; - speed_div = 1000000; break; case SPEED_100: ptr = 4; - speed_div = 100000; break; default: - return -EOPNOTSUPP; + netdev_err(priv->dev, + "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n", + port_transmit_rate_kbps); + return -EINVAL; } mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; @@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv, } /* Final adjustments for HW */ - value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); - value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); value = qopt->hicredit * 1024ll * 8; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 51495cb4b9be..838e85ddec67 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; @@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs4) @@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; @@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; @@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs6) @@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index c22897bf5509..017a6102be0a 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev) rcu_read_lock(); peer = rcu_dereference(nsim->peer); - iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0; + iflink = peer ? READ_ONCE(peer->netdev->ifindex) : + READ_ONCE(dev->ifindex); rcu_read_unlock(); return iflink; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2b8f8b7f1517..5aada7cf3da7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev) { int ret; + /* Chip can be powered down by the bootstrap code. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (ret & BMCR_PDOWN) { + ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN); + if (ret < 0) + return ret; + usleep_range(1000, 2000); + } + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); if (ret) return ret; @@ -1939,7 +1950,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = { {0x1c, 0x20, 0xeeee}, }; -static int ksz9477_config_init(struct phy_device *phydev) +static int ksz9477_phy_errata(struct phy_device *phydev) { int err; int i; @@ -1967,16 +1978,30 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } + err = genphy_restart_aneg(phydev); + if (err) + return err; + + return err; +} + +static int ksz9477_config_init(struct phy_device *phydev) +{ + int err; + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + err = ksz9477_phy_errata(phydev); + if (err) + return err; + } + /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes * in this switch shall be regarded as broken. */ if (phydev->dev_flags & MICREL_NO_EEE) phydev->eee_broken_modes = -1; - err = genphy_restart_aneg(phydev); - if (err) - return err; - return kszphy_config_init(phydev); } @@ -2085,6 +2110,71 @@ static int kszphy_resume(struct phy_device *phydev) return 0; } +static int ksz9477_resume(struct phy_device *phydev) +{ + int ret; + + /* No need to initialize registers if not powered down. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + ret = ksz9477_phy_errata(phydev); + if (ret) + return ret; + } + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + +static int ksz8061_resume(struct phy_device *phydev) +{ + int ret; + + /* This function can be called twice when the Ethernet device is on. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + usleep_range(1000, 2000); + + /* Re-program the value after chip is reset. */ + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -5339,7 +5429,7 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = kszphy_suspend, - .resume = kszphy_resume, + .resume = ksz8061_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@ -5493,7 +5583,7 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = ksz9477_resume, .get_features = ksz9477_get_features, } }; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 3f9cbd797fd6..a5684ef5884b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) /* Handle remove event globally, it resets this state machine */ if (event == SFP_E_REMOVE) { - if (sfp->sm_mod_state > SFP_MOD_PROBE) - sfp_sm_mod_remove(sfp); + sfp_sm_mod_remove(sfp); sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0); return; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4a802c0ea2cb..61a57d134544 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2686,6 +2686,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd { struct scatterlist *sgs[5], hdr, stat; u32 out_num = 0, tmp, in_num = 0; + bool ok; int ret; /* Caller should know better */ @@ -2731,8 +2732,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd } unlock: + ok = vi->ctrl->status == VIRTIO_NET_OK; mutex_unlock(&vi->cvq_lock); - return vi->ctrl->status == VIRTIO_NET_OK; + return ok; } static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, @@ -4257,7 +4259,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; struct scatterlist sgs_rx; - int ret = 0; int i; if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) @@ -4267,27 +4268,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) return -EINVAL; - /* Acquire all queues dim_locks */ - for (i = 0; i < vi->max_queue_pairs; i++) - mutex_lock(&vi->rq[i].dim_lock); - if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { vi->rx_dim_enabled = true; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].dim_enabled = true; - goto unlock; + mutex_unlock(&vi->rq[i].dim_lock); + } + return 0; } coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); - if (!coal_rx) { - ret = -ENOMEM; - goto unlock; - } + if (!coal_rx) + return -ENOMEM; if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { vi->rx_dim_enabled = false; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].dim_enabled = false; + mutex_unlock(&vi->rq[i].dim_lock); + } } /* Since the per-queue coalescing params can be set, @@ -4300,22 +4301,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, - &sgs_rx)) { - ret = -EINVAL; - goto unlock; - } + &sgs_rx)) + return -EINVAL; vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; - } -unlock: - for (i = vi->max_queue_pairs - 1; i >= 0; i--) mutex_unlock(&vi->rq[i].dim_lock); + } - return ret; + return 0; } static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, @@ -4417,9 +4415,9 @@ static void virtnet_rx_dim_work(struct work_struct *work) if (err) pr_debug("%s: Failed to send dim parameters on rxq%d\n", dev->name, qnum); - dim->state = DIM_START_MEASURE; } out: + dim->state = DIM_START_MEASURE; mutex_unlock(&rq->dim_lock); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 89ca6e75fcc6..63822d454c00 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) rq->data_ring.base, rq->data_ring.basePA); rq->data_ring.base = NULL; - rq->data_ring.desc_size = 0; } + rq->data_ring.desc_size = 0; } } diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index f78dd0438843..567cb3faab70 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev, struct vxlan_fdb *f; u32 ifindex = 0; + /* Ignore packets from invalid src-address */ + if (!is_valid_ether_addr(src_mac)) + return true; + #if IS_ENABLED(CONFIG_IPV6) if (src_ip->sa.sa_family == AF_INET6 && (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) @@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) return false; - /* Ignore packets from invalid src-address */ - if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) - return false; - /* Get address from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index e6ea884cafc1..4f385f4a8cef 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -45,6 +45,7 @@ config ATH10K_SNOC depends on ATH10K depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_SMEM + depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n select QCOM_SCM select QCOM_QMI_HELPERS help diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 3cc817a3b4a4..b82e8fb28541 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, - .num_vdevs = 16 + 1, + .num_vdevs = 3, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 4f62e38ba48b..9b96dbb21d83 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7988,8 +7988,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; - struct cur_regulatory_info *reg_info; - enum ieee80211_ap_reg_power power_type; mutex_lock(&ar->conf_mutex); @@ -8000,17 +7998,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (ath11k_wmi_supports_6ghz_cc_ext(ar) && ctx->def.chan->band == NL80211_BAND_6GHZ && arvif->vdev_type == WMI_VDEV_TYPE_STA) { - reg_info = &ab->reg_info_store[ar->pdev_idx]; - power_type = vif->bss_conf.power_type; - - ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type); - - if (power_type == IEEE80211_REG_UNSET_AP) { - ret = -EINVAL; - goto out; - } - - ath11k_reg_handle_chan_list(ab, reg_info, power_type); arvif->chanctx = *ctx; ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); } @@ -9626,6 +9613,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + enum ieee80211_ap_reg_power power_type; + struct cur_regulatory_info *reg_info; struct ath11k_peer *peer; int ret = 0; @@ -9705,6 +9694,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", sta->addr, arvif->vdev_id, ret); } + + if (!ret && + ath11k_wmi_supports_6ghz_cc_ext(ar) && + arvif->vdev_type == WMI_VDEV_TYPE_STA && + arvif->chanctx.def.chan && + arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) { + reg_info = &ar->ab->reg_info_store[ar->pdev_idx]; + power_type = vif->bss_conf.power_type; + + if (power_type == IEEE80211_REG_UNSET_AP) { + ath11k_warn(ar->ab, "invalid power type %d\n", + power_type); + ret = -EINVAL; + } else { + ret = ath11k_reg_handle_chan_list(ar->ab, + reg_info, + power_type); + if (ret) + ath11k_warn(ar->ab, + "failed to handle chan list with power type %d\n", + power_type); + } + } } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { spin_lock_bh(&ar->ab->base_lock); diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 79eb3f9c902f..debe7c5919ef 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) { int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; + struct ath11k_ext_irq_grp *irq_grp; unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, @@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) irq_flags |= IRQF_NOBALANCING; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; irq_grp->napi_ndev = alloc_netdev_dummy(0); - if (!irq_grp->napi_ndev) - return -ENOMEM; + if (!irq_grp->napi_ndev) { + ret = -ENOMEM; + goto fail_allocate; + } netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_pcic_ext_grp_napi_poll); @@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) int irq = ath11k_pcic_get_msi_irq(ab, vector); if (irq < 0) { - for (n = 0; n <= i; n++) { - irq_grp = &ab->ext_irq_grp[n]; - free_netdev(irq_grp->napi_ndev); - } - return irq; + ret = irq; + goto fail_irq; } ab->irq_num[irq_idx] = irq; @@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) } return 0; +fail_irq: + /* i ->napi_ndev was properly allocated. Free it also */ + i += 1; +fail_allocate: + for (n = 0; n < i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } + return ret; } int ath11k_pcic_config_irq(struct ath11k_base *ab) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 33654f228ee8..d156a9c64194 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1815,8 +1815,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); - iwl_dbg_tlv_free(drv->trans); #endif + iwl_dbg_tlv_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 71e6b06481a9..54f4acbbd05b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, void *_data) { struct wowlan_key_gtk_type_iter *data = _data; + __le32 *cipher = NULL; + + if (key->keyidx == 4 || key->keyidx == 5) + cipher = &data->kek_kck_cmd->igtk_cipher; + if (key->keyidx == 6 || key->keyidx == 7) + cipher = &data->kek_kck_cmd->bigtk_cipher; switch (key->cipher) { default: @@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_CCM); return; case WLAN_CIPHER_SUITE_CCMP: if (!sta) @@ -2341,7 +2350,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, out: if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - WOWLAN_GET_STATUSES, 0) < 10) { + WOWLAN_GET_STATUSES, + IWL_FW_CMD_VER_UNKNOWN) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = status->non_qos_seq_ctr + 0x10; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 79f4ac8cbc72..8101ecbb478b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1617,6 +1617,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) &beacon_cmd.tim_size, beacon->data, beacon->len); + if (iwl_fw_lookup_cmd_ver(mvm->fw, + BEACON_TEMPLATE_CMD, 0) >= 14) { + u32 offset = iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len); + + beacon_cmd.btwt_offset = cpu_to_le32(offset); + } + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e7f5978ef2d7..f4937a100cbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; - __le32 *dump_data = mfu_dump_notif->data; - int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); - int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); - - for (i = 0; i < n_words; i++) - IWL_DEBUG_INFO(mvm, - "MFUART assert dump, dword %u: 0x%08x\n", - le16_to_cpu(mfu_dump_notif->index_num) * - n_words + i, - le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, @@ -895,8 +885,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) int ret; u16 len = 0; u32 n_subbands; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 5a06f887769a..5144fa0f96b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -873,7 +873,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, } } -static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) +u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 486a6b8f3c97..de9f0b446545 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1128,6 +1128,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); } +static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = data; + struct iwl_mvm_sta *mvm_sta; + struct ieee80211_vif *vif; + int link_id; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + vif = mvm_sta->vif; + + if (!sta->valid_links) + return; + + for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { + struct iwl_mvm_link_sta *mvm_link_sta; + + mvm_link_sta = + rcu_dereference_check(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (mvm_link_sta && !(vif->active_links & BIT(link_id))) { + /* + * We have a link STA but the link is inactive in + * mac80211. This will happen if we failed to + * deactivate the link but mac80211 roll back the + * deactivation of the link. + * Delete the stale data to avoid issues later on. + */ + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, + link_id, false); + } + } +} + static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); @@ -1150,6 +1183,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); + /* cleanup stations as links may be gone after restart */ + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_cleanup_sta_iterator, mvm); + mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); @@ -6348,7 +6385,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, - .flags = sync ? 0 : CMD_ASYNC, + .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC), }; int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 0a3b7284eedd..fcfd2dd7568e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_free_bf; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index b7a461dba41e..9d139b56e152 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); } -static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, - struct iwl_mvm_link_sta *mvm_sta_link, - unsigned int link_id, - bool is_in_fw) +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], is_in_fw ? ERR_PTR(-EINVAL) : NULL); @@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, cmd.modify.tid = cpu_to_le32(data->tid); - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL, + sizeof(cmd), &cmd); data->sta_mask = new_sta_mask; if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1f58c727fa63..0a1959bd4079 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1758,6 +1758,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); +u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 376b23b409dc..6cd4ec4d8f34 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -122,13 +122,8 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -/* - * FIXME - various places in firmware API still use u8, - * e.g. LQ command and SCD config command. - * This should be 256 instead. - */ -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d78af2928152..489cfb0a4ab1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -2450,8 +2450,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, * * We mark it as mac header, for upper layers to know where * all radio tap header ends. + * + * Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put */ - skb_reset_mac_header(skb); + skb_set_mac_header(skb, skb->len); /* * Override the nss from the rx_vec since the rate_n_flags has diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a7ec172eeade..b5f664ae5a17 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) general_params->adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, break; } - if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { + if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE && + !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid), + "scan: invalid BSSID at index %u, index_b=%u\n", + j, idex_b)) { memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); } @@ -3319,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), - 0, sizeof(cmd), &cmd); + CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 20d4968d692a..cc79fe991c26 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2848,7 +2848,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; - u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), + .flags = CMD_SEND_IN_RFKILL, + .len[0] = sizeof(cmd), + .data[0] = &cmd, + }; int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); @@ -2860,7 +2865,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; - } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { + } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { @@ -2869,8 +2874,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.remove.tid = cpu_to_le32(tid); } - ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), - &cmd, &baid); + ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 264f1f9394b6..754a05a8c189 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw); int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 0971c164b57e..c27acaf0eb1c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw, #endif /* CONFIG_PM */ const struct ieee80211_ops mt7615_ops = { + .add_chanctx = ieee80211_emulate_add_chanctx, + .remove_chanctx = ieee80211_emulate_remove_chanctx, + .change_chanctx = ieee80211_emulate_change_chanctx, + .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = mt7615_tx, .start = mt7615_start, .stop = mt7615_stop, diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 7d9fb9f2d527..089102ed9ae5 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy, struct wilc_vif *vif; u32 channelnum; int result; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return PTR_ERR(vif); } @@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy, if (result) netdev_err(vif->ndev, "Error in setting channel\n"); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return result; } @@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; struct wilc_priv *priv; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) goto out; @@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n"); out: - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } @@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (type == NL80211_IFTYPE_MONITOR) { struct net_device *ndev; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_vif_from_type(wl, WILC_AP_MODE); if (!vif) { vif = wilc_get_vif_from_type(wl, WILC_GO_MODE); if (!vif) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } } if (vif->monitor_flag) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } @@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (ndev) { vif->monitor_flag = 1; } else { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ERR_PTR(-EINVAL); } wdev = &vif->priv.wdev; - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return wdev; } @@ -1610,7 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) list_del_rcu(&vif->list); wl->vif_num--; mutex_unlock(&wl->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wl->srcu); return 0; } @@ -1635,23 +1638,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return; } netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); wilc_set_wowlan_trigger(vif, enabled); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); } static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; + int srcu_idx; s32 tx_power = MBM_TO_DBM(mbm); struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; @@ -1659,10 +1664,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, if (!wl->initialized) return -EIO; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return -EINVAL; } @@ -1674,7 +1679,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, ret = wilc_set_tx_power(vif, tx_power); if (ret) netdev_err(vif->ndev, "Failed to set tx power\n"); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } @@ -1757,6 +1762,7 @@ static void wlan_init_locks(struct wilc *wl) init_completion(&wl->cfg_event); init_completion(&wl->sync_event); init_completion(&wl->txq_thread_started); + init_srcu_struct(&wl->srcu); } void wlan_deinit_locks(struct wilc *wilc) @@ -1767,6 +1773,7 @@ void wlan_deinit_locks(struct wilc *wilc) mutex_destroy(&wilc->txq_add_to_head_cs); mutex_destroy(&wilc->vif_mutex); mutex_destroy(&wilc->deinit_lock); + cleanup_srcu_struct(&wilc->srcu); } int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 919de6ffb821..f1085ccb7eed 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1570,11 +1570,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; + int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1593,7 +1594,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) msg->body.net_info.rssi = buffer[8]; msg->body.net_info.mgmt = kmemdup(&buffer[9], msg->body.net_info.frame_len, - GFP_ATOMIC); + GFP_KERNEL); if (!msg->body.net_info.mgmt) { kfree(msg); goto out; @@ -1606,7 +1607,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) @@ -1614,13 +1615,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; + int srcu_idx; int result; int id; mutex_lock(&wilc->deinit_lock); id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1647,7 +1649,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); mutex_unlock(&wilc->deinit_lock); } @@ -1655,11 +1657,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) { struct host_if_drv *hif_drv; struct wilc_vif *vif; + int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1684,7 +1687,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) } } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan, diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 73f56f7b002b..710e29bea560 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) { + int srcu_idx; u8 ret_val = 0; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { if (!is_zero_ether_addr(vif->bssid)) ret_val++; } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return ret_val; } static void wilc_wake_tx_queues(struct wilc *wl) { + int srcu_idx; struct wilc_vif *ifc; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); wilc_for_each_vif(wl, ifc) { if (ifc->mac_opened && netif_queue_stopped(ifc->ndev)) netif_wake_queue(ifc->ndev); } - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); } static int wilc_txq_task(void *vp) @@ -653,6 +655,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) struct sockaddr *addr = (struct sockaddr *)p; unsigned char mac_addr[ETH_ALEN]; struct wilc_vif *tmp_vif; + int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -664,19 +667,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) /* Verify MAC Address is not already in use: */ - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, tmp_vif) { wilc_get_mac_address(tmp_vif, mac_addr); if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return -EADDRNOTAVAIL; } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return 0; } } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); if (result) @@ -764,14 +767,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) wilc_tx_complete); if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) { + int srcu_idx; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { if (vif->mac_opened) netif_stop_queue(vif->ndev); } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } return NETDEV_TX_OK; @@ -815,12 +819,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, unsigned int frame_len = 0; struct wilc_vif *vif; struct sk_buff *skb; + int srcu_idx; int stats; if (!wilc) return; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_netdev = get_if_handler(wilc, buff); if (!wilc_netdev) goto out; @@ -848,14 +853,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) { + int srcu_idx; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff; u16 type = le16_to_cpup((__le16 *)buff); @@ -876,7 +882,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) if (vif->monitor_flag) wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size); } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } static const struct net_device_ops wilc_netdev_ops = { @@ -906,7 +912,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) list_del_rcu(&vif->list); wilc->vif_num--; mutex_unlock(&wilc->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wilc->srcu); if (vif->ndev) unregister_netdev(vif->ndev); } @@ -925,15 +931,16 @@ static u8 wilc_get_available_idx(struct wilc *wl) { int idx = 0; struct wilc_vif *vif; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); wilc_for_each_vif(wl, vif) { if (vif->idx == 0) idx = 1; else idx = 0; } - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return idx; } @@ -983,7 +990,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, list_add_tail_rcu(&vif->list, &wl->vif_list); wl->vif_num += 1; mutex_unlock(&wl->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wl->srcu); return vif; diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index eecee3973d6a..fde8610a9c84 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -32,8 +32,8 @@ #define wilc_for_each_vif(w, v) \ struct wilc *_w = w; \ - list_for_each_entry_rcu(v, &_w->vif_list, list, \ - rcu_read_lock_held()) + list_for_each_entry_srcu(v, &_w->vif_list, list, \ + srcu_read_lock_held(&_w->srcu)) struct wilc_wfi_stats { unsigned long rx_packets; @@ -220,6 +220,14 @@ struct wilc { /* protect vif list */ struct mutex vif_mutex; + /* Sleepable RCU struct to manipulate vif list. Sleepable version is + * needed over the classic RCU version because the driver's current + * design involves some sleeping code while manipulating a vif + * retrieved from vif list (so in a SRCU critical section), like: + * - sending commands to the chip, using info from retrieved vif + * - registering a new monitoring net device + */ + struct srcu_struct srcu; u8 open_ifcs; /* protect head of transmit queue */ diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 37c32d17856e..a9e872a7b2c3 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) u32 *vmm_table = wilc->vmm_table; u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; const struct wilc_hif_func *func; + int srcu_idx; u8 *txb = wilc->tx_buffer; struct wilc_vif *vif; @@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) mutex_lock(&wilc->txq_add_to_head_cs); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev); - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); for (ac = 0; ac < NQUEUES; ac++) tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 2e60a6991ca1..42b7db12b1bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, - "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", - hw->conf.long_frame_max_tx_count); - /* brought up everything changes (changed == ~0) indicates first - * open, so use our default value instead of that of wiphy. - */ - if (changed != ~0) { - mac->retry_long = hw->conf.long_frame_max_tx_count; - mac->retry_short = hw->conf.long_frame_max_tx_count; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, - (u8 *)(&hw->conf.long_frame_max_tx_count)); - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL && !rtlpriv->proximity.proxim_on) { struct ieee80211_channel *channel = hw->conf.chandef.chan; diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index bef6819986e9..33d6342124bc 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink) rc = PTR_ERR(devlink->cd_regions[i]); dev_err(devlink->dev, "Devlink region fail,err %d", rc); /* Delete previously created regions */ - for ( ; i >= 0; i--) + for (i--; i >= 0; i--) devlink_region_destroy(devlink->cd_regions[i]); goto region_create_fail; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f5d150c62955..782090ce0bc1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -998,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req) clear_bit_unlock(0, &ctrl->discard_page_busy); else kfree(bvec_virt(&req->special_vec)); + req->rq_flags &= ~RQF_SPECIAL_PAYLOAD; } } EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); @@ -3959,12 +3960,13 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, mutex_lock(&ctrl->namespaces_lock); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { - if (ns->head->ns_id > nsid) - list_splice_init_rcu(&ns->list, &rm_list, - synchronize_rcu); + if (ns->head->ns_id > nsid) { + list_del_rcu(&ns->list); + synchronize_srcu(&ctrl->srcu); + list_add_tail_rcu(&ns->list, &rm_list); + } } mutex_unlock(&ctrl->namespaces_lock); - synchronize_srcu(&ctrl->srcu); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index c6ad2148c2e0..ceb9c0ed3120 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9d9d2a127c4e..8b69427a4476 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, return req; } +static void nvme_unmap_bio(struct bio *bio) +{ + if (bio_integrity(bio)) + bio_integrity_unmap_free_user(bio); + blk_rq_unmap_user(bio); +} + static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) @@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, out_unmap: if (bio) - blk_rq_unmap_user(bio); + nvme_unmap_bio(bio); out: blk_mq_free_request(req); return ret; @@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (bio) - blk_rq_unmap_user(bio); + nvme_unmap_bio(bio); blk_mq_free_request(req); if (effects) @@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) - blk_rq_unmap_user(pdu->bio); + nvme_unmap_bio(pdu->bio); io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); } @@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, */ if (blk_rq_is_poll(req)) { if (pdu->bio) - blk_rq_unmap_user(pdu->bio); + nvme_unmap_bio(pdu->bio); io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index e05571b2a1b0..8fa1ffcdaed4 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc) if (nvme_is_path_error(nvme_sc)) return PR_STS_PATH_FAILED; - switch (nvme_sc) { + switch (nvme_sc & 0x7ff) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 06f0c587f343..4ff460ba2826 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -957,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->metadata_sg_cnt = 0; req->transfer_len = 0; req->metadata_len = 0; + req->cqe->result.u64 = 0; req->cqe->status = 0; req->cqe->sq_head = 0; req->ns = NULL; diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index d61b8c6ff3b2..cb34d644ed08 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -333,7 +333,6 @@ done: pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", __func__, ctrl->cntlid, req->sq->qid, status, req->error_loc); - req->cqe->result.u64 = 0; if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; @@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, d, al); kfree(d); done: - req->cqe->result.u64 = 0; - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) nvmet_auth_sq_free(req->sq); else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 042b379cbb36..69d77d34bec1 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); @@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index bb4a69d538fd..f003782d4ecf 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: - nvmet_passthru_override_id_ctrl(req); + status = nvmet_passthru_override_id_ctrl(req); break; case NVME_ID_CNS_NS: - nvmet_passthru_override_id_ns(req); + status = nvmet_passthru_override_id_ns(req); break; case NVME_ID_CNS_NS_DESC_LIST: - nvmet_passthru_override_id_descs(req); + status = nvmet_passthru_override_id_descs(req); break; } } else if (status < 0) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 174900072c18..462375b293e4 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -25,6 +25,8 @@ #include <linux/string.h> #include <linux/slab.h> +#include "of_private.h" + /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @dev: Device node of the device whose interrupt is to be mapped @@ -96,6 +98,57 @@ static const char * const of_irq_imap_abusers[] = { NULL, }; +const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq) +{ + u32 intsize, addrsize; + struct device_node *np; + + /* Get the interrupt parent */ + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + np = of_node_get(of_irq_dflt_pic); + else + np = of_find_node_by_phandle(be32_to_cpup(imap)); + imap++; + + /* Check if not found */ + if (!np) { + pr_debug(" -> imap parent not found !\n"); + return NULL; + } + + /* Get #interrupt-cells and #address-cells of new parent */ + if (of_property_read_u32(np, "#interrupt-cells", + &intsize)) { + pr_debug(" -> parent lacks #interrupt-cells!\n"); + of_node_put(np); + return NULL; + } + if (of_property_read_u32(np, "#address-cells", + &addrsize)) + addrsize = 0; + + pr_debug(" -> intsize=%d, addrsize=%d\n", + intsize, addrsize); + + /* Check for malformed properties */ + if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS) + || (len < (addrsize + intsize))) { + of_node_put(np); + return NULL; + } + + pr_debug(" -> imaplen=%d\n", len); + + imap += addrsize + intsize; + + out_irq->np = np; + for (int i = 0; i < intsize; i++) + out_irq->args[i] = be32_to_cpup(imap - intsize + i); + out_irq->args_count = intsize; + + return imap; +} + /** * of_irq_parse_raw - Low level interrupt tree parsing * @addr: address specifier (start of "reg" property of the device) in be32 format @@ -112,12 +165,12 @@ static const char * const of_irq_imap_abusers[] = { */ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) { - struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; + struct device_node *ipar, *tnode, *old = NULL; __be32 initial_match_array[MAX_PHANDLE_ARGS]; const __be32 *match_array = initial_match_array; - const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; - u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; - int imaplen, match, i, rc = -EINVAL; + const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; + u32 intsize = 1, addrsize; + int i, rc = -EINVAL; #ifdef DEBUG of_print_phandle_args("of_irq_parse_raw: ", out_irq); @@ -176,6 +229,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Now start the actual "proper" walk of the interrupt tree */ while (ipar != NULL) { + int imaplen, match; + const __be32 *imap, *oldimap, *imask; + struct device_node *newpar; /* * Now check if cursor is an interrupt-controller and * if it is then we are done, unless there is an @@ -216,7 +272,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Parse interrupt-map */ match = 0; - while (imaplen > (addrsize + intsize + 1) && !match) { + while (imaplen > (addrsize + intsize + 1)) { /* Compare specifiers */ match = 1; for (i = 0; i < (addrsize + intsize); i++, imaplen--) @@ -224,48 +280,17 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); - /* Get the interrupt parent */ - if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) - newpar = of_node_get(of_irq_dflt_pic); - else - newpar = of_find_node_by_phandle(be32_to_cpup(imap)); - imap++; - --imaplen; - - /* Check if not found */ - if (newpar == NULL) { - pr_debug(" -> imap parent not found !\n"); - goto fail; - } - - if (!of_device_is_available(newpar)) - match = 0; - - /* Get #interrupt-cells and #address-cells of new - * parent - */ - if (of_property_read_u32(newpar, "#interrupt-cells", - &newintsize)) { - pr_debug(" -> parent lacks #interrupt-cells!\n"); - goto fail; - } - if (of_property_read_u32(newpar, "#address-cells", - &newaddrsize)) - newaddrsize = 0; - - pr_debug(" -> newintsize=%d, newaddrsize=%d\n", - newintsize, newaddrsize); - - /* Check for malformed properties */ - if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS) - || (imaplen < (newaddrsize + newintsize))) { - rc = -EFAULT; + oldimap = imap; + imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq); + if (!imap) goto fail; - } - imap += newaddrsize + newintsize; - imaplen -= newaddrsize + newintsize; + match &= of_device_is_available(out_irq->np); + if (match) + break; + of_node_put(out_irq->np); + imaplen -= imap - oldimap; pr_debug(" -> imaplen=%d\n", imaplen); } if (!match) { @@ -287,11 +312,11 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * Successfully parsed an interrupt-map translation; copy new * interrupt specifier into the out_irq structure */ - match_array = imap - newaddrsize - newintsize; - for (i = 0; i < newintsize; i++) - out_irq->args[i] = be32_to_cpup(imap - newintsize + i); - out_irq->args_count = intsize = newintsize; - addrsize = newaddrsize; + match_array = oldimap + 1; + + newpar = out_irq->np; + intsize = out_irq->args_count; + addrsize = (imap - match_array) - intsize; if (ipar == newpar) { pr_debug("%pOF interrupt-map entry to self\n", ipar); @@ -300,7 +325,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) skiplevel: /* Iterate again with new parent */ - out_irq->np = newpar; pr_debug(" -> new parent: %pOF\n", newpar); of_node_put(ipar); ipar = newpar; @@ -310,7 +334,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) fail: of_node_put(ipar); - of_node_put(newpar); return rc; } diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 94fc0aa07af9..04aa2a91f851 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -159,6 +159,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np, extern int of_bus_n_addr_cells(struct device_node *np); extern int of_bus_n_size_cells(struct device_node *np); +const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, + struct of_phandle_args *out_irq); + struct bus_dma_region; #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA) int of_dma_get_range(struct device_node *np, diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c index a9301d293f01..c85a258bc6ae 100644 --- a/drivers/of/of_test.c +++ b/drivers/of/of_test.c @@ -54,4 +54,5 @@ static struct kunit_suite of_dtb_suite = { kunit_test_suites( &of_dtb_suite, ); +MODULE_DESCRIPTION("KUnit tests for OF APIs"); MODULE_LICENSE("GPL"); diff --git a/drivers/of/property.c b/drivers/of/property.c index 1c83e68f805b..164d77cb9445 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1306,10 +1306,10 @@ static struct device_node *parse_interrupts(struct device_node *np, static struct device_node *parse_interrupt_map(struct device_node *np, const char *prop_name, int index) { - const __be32 *imap, *imap_end, *addr; + const __be32 *imap, *imap_end; struct of_phandle_args sup_args; u32 addrcells, intcells; - int i, imaplen; + int imaplen; if (!IS_ENABLED(CONFIG_OF_IRQ)) return NULL; @@ -1322,33 +1322,23 @@ static struct device_node *parse_interrupt_map(struct device_node *np, addrcells = of_bus_n_addr_cells(np); imap = of_get_property(np, "interrupt-map", &imaplen); - if (!imap || imaplen <= (addrcells + intcells)) + imaplen /= sizeof(*imap); + if (!imap) return NULL; - imap_end = imap + imaplen; - while (imap < imap_end) { - addr = imap; - imap += addrcells; + imap_end = imap + imaplen; - sup_args.np = np; - sup_args.args_count = intcells; - for (i = 0; i < intcells; i++) - sup_args.args[i] = be32_to_cpu(imap[i]); - imap += intcells; + for (int i = 0; imap + addrcells + intcells + 1 < imap_end; i++) { + imap += addrcells + intcells; - /* - * Upon success, the function of_irq_parse_raw() returns - * interrupt controller DT node pointer in sup_args.np. - */ - if (of_irq_parse_raw(addr, &sup_args)) + imap = of_irq_parse_imap_parent(imap, imap_end - imap, &sup_args); + if (!imap) return NULL; - if (!index) + if (i == index) return sup_args.np; of_node_put(sup_args.np); - imap += sup_args.args_count + 1; - index--; } return NULL; diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index e6dc857aac3f..e06c7b2aac5c 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev) parport_put_port(port); } -static struct platform_driver amiga_parallel_driver = { +/* + * amiga_parallel_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver amiga_parallel_driver __refdata = { .remove_new = __exit_p(amiga_parallel_remove), .driver = { .name = "amiga-parallel", diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 30f031de9cfe..b123da16b63b 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -289,8 +289,6 @@ void pci_cfg_access_lock(struct pci_dev *dev) { might_sleep(); - lock_map_acquire(&dev->cfg_access_lock); - raw_spin_lock_irq(&pci_lock); if (dev->block_cfg_access) pci_wait_cfg(dev); @@ -345,8 +343,6 @@ void pci_cfg_access_unlock(struct pci_dev *dev) raw_spin_unlock_irqrestore(&pci_lock, flags); wake_up_all(&pci_cfg_wait); - - lock_map_release(&dev->cfg_access_lock); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 59e0949fb079..35fb1f17a589 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4883,7 +4883,6 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { - lock_map_assert_held(&dev->cfg_access_lock); pcibios_reset_secondary_bus(dev); return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8e696e547565..5fbabb4e3425 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2546,9 +2546,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; - lockdep_register_key(&dev->cfg_access_key); - lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev), - &dev->cfg_access_key, 0); dma_set_max_seg_size(&dev->dev, 65536); dma_set_seg_boundary(&dev->dev, 0xffffffff); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1953317541ea..665fa9524986 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -136,6 +136,7 @@ config YOGABOOK config YT2_1380 tristate "Lenovo Yoga Tablet 2 1380 fast charge driver" depends on SERIAL_DEV_BUS + depends on EXTCON depends on ACPI help Say Y here to enable support for the custom fast charging protocol diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c index d84ea66eecc6..8fcf38eed7f0 100644 --- a/drivers/platform/x86/amd/hsmp.c +++ b/drivers/platform/x86/amd/hsmp.c @@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void) return ret; } +/* + * This check is only needed for backward compatibility of previous platforms. + * All new platforms are expected to support ACPI based probing. + */ +static bool legacy_hsmp_support(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return false; + + switch (boot_cpu_data.x86) { + case 0x19: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x1F: + case 0x30 ... 0x3F: + case 0x90 ... 0x9F: + case 0xA0 ... 0xAF: + return true; + default: + return false; + } + case 0x1A: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x1F: + return true; + default: + return false; + } + default: + return false; + } + + return false; +} + static int __init hsmp_plt_init(void) { int ret = -ENODEV; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) { - pr_err("HSMP is not supported on Family:%x model:%x\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return ret; - } - /* * amd_nb_num() returns number of SMN/DF interfaces present in the system * if we have N SMN/DF interfaces that ideally means N sockets @@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void) return ret; if (!plat_dev.is_acpi_device) { - ret = hsmp_plat_dev_register(); + if (legacy_hsmp_support()) { + /* Not ACPI device, but supports HSMP, register a plat_dev */ + ret = hsmp_plat_dev_register(); + } else { + /* Not ACPI, Does not support HSMP */ + pr_info("HSMP is not supported on Family:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + ret = -ENODEV; + } if (ret) platform_driver_unregister(&amd_hsmp_driver); } diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index e61bfaf8b5c4..b562ed99ec4e 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -11,6 +11,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/container_of.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/capability.h> @@ -25,11 +26,16 @@ static u32 da_supported_commands; static int da_num_tokens; static struct platform_device *platform_device; static struct calling_interface_token *da_tokens; -static struct device_attribute *token_location_attrs; -static struct device_attribute *token_value_attrs; +static struct token_sysfs_data *token_entries; static struct attribute **token_attrs; static DEFINE_MUTEX(smbios_mutex); +struct token_sysfs_data { + struct device_attribute location_attr; + struct device_attribute value_attr; + struct calling_interface_token *token; +}; + struct smbios_device { struct list_head list; struct device *device; @@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } -static int match_attribute(struct device *dev, - struct device_attribute *attr) -{ - int i; - - for (i = 0; i < da_num_tokens * 2; i++) { - if (!token_attrs[i]) - continue; - if (strcmp(token_attrs[i]->name, attr->attr.name) == 0) - return i/2; - } - dev_dbg(dev, "couldn't match: %s\n", attr->attr.name); - return -EINVAL; -} - static ssize_t location_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].location); - return 0; + return sysfs_emit(buf, "%08x", data->token->location); } static ssize_t value_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].value); - return 0; + return sysfs_emit(buf, "%08x", data->token->value); } static struct attribute_group smbios_attribute_group = { @@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev) { char *location_name; char *value_name; - size_t size; int ret; int i, j; - /* (number of tokens + 1 for null terminated */ - size = sizeof(struct device_attribute) * (da_num_tokens + 1); - token_location_attrs = kzalloc(size, GFP_KERNEL); - if (!token_location_attrs) + token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL); + if (!token_entries) return -ENOMEM; - token_value_attrs = kzalloc(size, GFP_KERNEL); - if (!token_value_attrs) - goto out_allocate_value; /* need to store both location and value + terminator*/ - size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1); - token_attrs = kzalloc(size, GFP_KERNEL); + token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL); if (!token_attrs) goto out_allocate_attrs; @@ -496,32 +474,34 @@ static int build_tokens_sysfs(struct platform_device *dev) /* skip empty */ if (da_tokens[i].tokenID == 0) continue; + + token_entries[i].token = &da_tokens[i]; + /* add location */ location_name = kasprintf(GFP_KERNEL, "%04x_location", da_tokens[i].tokenID); if (location_name == NULL) goto out_unwind_strings; - sysfs_attr_init(&token_location_attrs[i].attr); - token_location_attrs[i].attr.name = location_name; - token_location_attrs[i].attr.mode = 0444; - token_location_attrs[i].show = location_show; - token_attrs[j++] = &token_location_attrs[i].attr; + + sysfs_attr_init(&token_entries[i].location_attr.attr); + token_entries[i].location_attr.attr.name = location_name; + token_entries[i].location_attr.attr.mode = 0444; + token_entries[i].location_attr.show = location_show; + token_attrs[j++] = &token_entries[i].location_attr.attr; /* add value */ value_name = kasprintf(GFP_KERNEL, "%04x_value", da_tokens[i].tokenID); - if (value_name == NULL) - goto loop_fail_create_value; - sysfs_attr_init(&token_value_attrs[i].attr); - token_value_attrs[i].attr.name = value_name; - token_value_attrs[i].attr.mode = 0444; - token_value_attrs[i].show = value_show; - token_attrs[j++] = &token_value_attrs[i].attr; - continue; - -loop_fail_create_value: - kfree(location_name); - goto out_unwind_strings; + if (!value_name) { + kfree(location_name); + goto out_unwind_strings; + } + + sysfs_attr_init(&token_entries[i].value_attr.attr); + token_entries[i].value_attr.attr.name = value_name; + token_entries[i].value_attr.attr.mode = 0444; + token_entries[i].value_attr.show = value_show; + token_attrs[j++] = &token_entries[i].value_attr.attr; } smbios_attribute_group.attrs = token_attrs; @@ -532,14 +512,12 @@ loop_fail_create_value: out_unwind_strings: while (i--) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); out_allocate_attrs: - kfree(token_value_attrs); -out_allocate_value: - kfree(token_location_attrs); + kfree(token_entries); return -ENOMEM; } @@ -551,12 +529,11 @@ static void free_group(struct platform_device *pdev) sysfs_remove_group(&pdev->dev.kobj, &smbios_attribute_group); for (i = 0; i < da_num_tokens; i++) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); - kfree(token_value_attrs); - kfree(token_location_attrs); + kfree(token_entries); } static int __init dell_smbios_init(void) diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 2d9ca2292ea1..f74af0a689f2 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -34,7 +34,6 @@ static const struct property_entry archos_101_cesium_educ_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"), { } @@ -49,7 +48,6 @@ static const struct property_entry bush_bush_windows_tablet_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1850), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"), { } @@ -79,7 +77,6 @@ static const struct property_entry chuwi_hi8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -95,7 +92,6 @@ static const struct property_entry chuwi_hi8_pro_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -123,7 +119,6 @@ static const struct property_entry chuwi_hi10_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5), PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -139,7 +134,6 @@ static const struct property_entry chuwi_hi10_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1908), PROPERTY_ENTRY_U32("touchscreen-size-y", 1270), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("silead,pen-supported"), PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8), @@ -171,7 +165,6 @@ static const struct property_entry chuwi_hi10_pro_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"), PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("silead,pen-supported"), PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8), @@ -201,7 +194,6 @@ static const struct property_entry chuwi_hibook_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -227,7 +219,6 @@ static const struct property_entry chuwi_vi8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -255,7 +246,6 @@ static const struct property_entry chuwi_vi10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1858), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -271,7 +261,6 @@ static const struct property_entry chuwi_surbook_mini_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2040), PROPERTY_ENTRY_U32("touchscreen-size-y", 1524), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), { } }; @@ -289,7 +278,6 @@ static const struct property_entry connect_tablet9_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -306,7 +294,6 @@ static const struct property_entry csl_panther_tab_hd_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -322,7 +309,6 @@ static const struct property_entry cube_iwork8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 896), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -346,7 +332,6 @@ static const struct property_entry cube_knote_i1101_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1961), PROPERTY_ENTRY_U32("touchscreen-size-y", 1513), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -360,7 +345,6 @@ static const struct property_entry dexp_ursus_7w_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 890), PROPERTY_ENTRY_U32("touchscreen-size-y", 630), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -376,7 +360,6 @@ static const struct property_entry dexp_ursus_kx210i_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1720), PROPERTY_ENTRY_U32("touchscreen-size-y", 1137), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -391,7 +374,6 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -450,7 +432,6 @@ static const struct property_entry irbis_tw90_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -466,7 +447,6 @@ static const struct property_entry irbis_tw118_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1960), PROPERTY_ENTRY_U32("touchscreen-size-y", 1510), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -483,7 +463,6 @@ static const struct property_entry itworks_tw891_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -496,7 +475,6 @@ static const struct property_entry jumper_ezpad_6_pro_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -511,7 +489,6 @@ static const struct property_entry jumper_ezpad_6_pro_b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -527,7 +504,6 @@ static const struct property_entry jumper_ezpad_6_m4_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1950), PROPERTY_ENTRY_U32("touchscreen-size-y", 1525), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -544,7 +520,6 @@ static const struct property_entry jumper_ezpad_7_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1526), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"), { } }; @@ -561,7 +536,6 @@ static const struct property_entry jumper_ezpad_mini3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1138), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -578,7 +552,6 @@ static const struct property_entry mpman_converter9_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -594,7 +567,6 @@ static const struct property_entry mpman_mpwin895cl_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1150), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -611,7 +583,6 @@ static const struct property_entry myria_my8307_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -628,7 +599,6 @@ static const struct property_entry onda_obook_20_plus_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -645,7 +615,6 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -669,7 +638,6 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -687,7 +655,6 @@ static const struct property_entry onda_v891_v5_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891-v5.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -703,7 +670,6 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -720,7 +686,6 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -759,7 +724,6 @@ static const struct property_entry pipo_w11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -775,7 +739,6 @@ static const struct property_entry positivo_c4128b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1915), PROPERTY_ENTRY_U32("touchscreen-size-y", 1269), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -791,7 +754,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -808,7 +770,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -825,7 +786,6 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -842,7 +802,6 @@ static const struct property_entry predia_basic_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1144), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -859,7 +818,6 @@ static const struct property_entry rca_cambio_w101_v2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 874), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -874,7 +832,6 @@ static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -890,7 +847,6 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -906,7 +862,6 @@ static const struct property_entry globalspace_solt_ivw116_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1723), PROPERTY_ENTRY_U32("touchscreen-size-y", 1077), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -923,7 +878,6 @@ static const struct property_entry techbite_arc_11_6_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1270), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -939,7 +893,6 @@ static const struct property_entry teclast_tbook11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1264), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -965,7 +918,6 @@ static const struct property_entry teclast_x16_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1264), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -988,7 +940,6 @@ static const struct property_entry teclast_x3_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1004,7 +955,6 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -1018,7 +968,6 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1032,7 +981,6 @@ static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1046,7 +994,6 @@ static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), { } @@ -1074,7 +1021,6 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1090,7 +1036,6 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1107,7 +1052,6 @@ static const struct property_entry viglen_connect_10_props[] = { PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1121,7 +1065,6 @@ static const struct property_entry vinga_twizzle_j116_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1920), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1907,7 +1850,7 @@ static int __init ts_parse_props(char *str) u32 u32val; int i, ret; - strscpy(orig_str, str, sizeof(orig_str)); + strscpy(orig_str, str); /* * str is part of the static_command_line from init/main.c and poking diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index e74a0f6a3157..4e80273dfb1e 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h @@ -6,6 +6,7 @@ extern struct mutex pnp_lock; extern const struct attribute_group *pnp_dev_groups[]; +extern const struct bus_type pnp_bus_type; int pnp_register_protocol(struct pnp_protocol *protocol); void pnp_unregister_protocol(struct pnp_protocol *protocol); diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 0a5d0d8befa8..3483e52e3a81 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -266,6 +266,12 @@ const struct bus_type pnp_bus_type = { .dev_groups = pnp_dev_groups, }; +bool dev_is_pnp(const struct device *dev) +{ + return dev->bus == &pnp_bus_type; +} +EXPORT_SYMBOL_GPL(dev_is_pnp); + int pnp_register_driver(struct pnp_driver *drv) { drv->driver.name = drv->name; diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 7513018c9f9a..2067b0120d08 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, } if (info->verify(info, pin, func, chan)) { - pr_err("driver cannot use function %u on pin %u\n", func, chan); + pr_err("driver cannot use function %u and channel %u on pin %u\n", + func, chan, pin); return -EOPNOTSUPP; } diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 5de69e0bb0f9..196c1c8b578c 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo); int get_df_system_info(void); int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num); -int get_addr_hash_mi300(void); +int get_umc_info_mi300(void); int get_address_map(struct addr_ctx *ctx); diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c index 701349e84942..6979fa3d4fe2 100644 --- a/drivers/ras/amd/atl/system.c +++ b/drivers/ras/amd/atl/system.c @@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg) if (reg == DF_FUNC0_ID_MI300) { df_cfg.flags.heterogeneous = 1; - if (get_addr_hash_mi300()) + if (get_umc_info_mi300()) return -EINVAL; } diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index 59b6169093f7..a1b4accf7b96 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -68,6 +68,8 @@ struct xor_bits { }; #define NUM_BANK_BITS 4 +#define NUM_COL_BITS 5 +#define NUM_SID_BITS 2 static struct { /* UMC::CH::AddrHashBank */ @@ -80,7 +82,22 @@ static struct { u8 bank_xor; } addr_hash; +static struct { + u8 bank[NUM_BANK_BITS]; + u8 col[NUM_COL_BITS]; + u8 sid[NUM_SID_BITS]; + u8 num_row_lo; + u8 num_row_hi; + u8 row_lo; + u8 row_hi; + u8 pc; +} bit_shifts; + #define MI300_UMC_CH_BASE 0x90000 +#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30) +#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40) +#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50) +#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4) #define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8) #define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0) #define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4) @@ -90,17 +107,42 @@ static struct { #define ADDR_HASH_ROW_XOR GENMASK(31, 14) #define ADDR_HASH_BANK_XOR GENMASK(5, 0) +#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8) +#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12) + +#define ADDR_SEL_BANK0 GENMASK(3, 0) +#define ADDR_SEL_BANK1 GENMASK(7, 4) +#define ADDR_SEL_BANK2 GENMASK(11, 8) +#define ADDR_SEL_BANK3 GENMASK(15, 12) +#define ADDR_SEL_BANK4 GENMASK(20, 16) +#define ADDR_SEL_ROW_LO GENMASK(27, 24) +#define ADDR_SEL_ROW_HI GENMASK(31, 28) + +#define COL_SEL_LO_COL0 GENMASK(3, 0) +#define COL_SEL_LO_COL1 GENMASK(7, 4) +#define COL_SEL_LO_COL2 GENMASK(11, 8) +#define COL_SEL_LO_COL3 GENMASK(15, 12) +#define COL_SEL_LO_COL4 GENMASK(19, 16) + +#define ADDR_SEL_2_BANK5 GENMASK(4, 0) +#define ADDR_SEL_2_CHAN GENMASK(15, 12) + /* * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used - * for hashing. Do this during module init, since the values will not - * change during run time. + * for hashing. + * + * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to + * get the values needed to reconstruct the normalized address. Apply additional + * offsets to the raw register values, as needed. + * + * Do this during module init, since the values will not change during run time. * * These registers are instantiated for each UMC across each AMD Node. * However, they should be identically programmed due to the fixed hardware * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a * single global structure for simplicity. */ -int get_addr_hash_mi300(void) +int get_umc_info_mi300(void) { u32 temp; int ret; @@ -130,6 +172,44 @@ int get_addr_hash_mi300(void) addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp); + ret = amd_smn_read(0, MI300_ADDR_CFG, &temp); + if (ret) + return ret; + + bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp); + bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL, &temp); + if (ret) + return ret; + + bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp); + bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp); + bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp); + bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp); + /* Use BankBit4 for the SID0 position. */ + bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp); + bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp); + bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp); + + ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp); + if (ret) + return ret; + + bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp); + bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp); + bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp); + bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp); + bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp); + if (ret) + return ret; + + /* Use BankBit5 for the SID1 position. */ + bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp); + bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp); + return 0; } @@ -146,9 +226,6 @@ int get_addr_hash_mi300(void) * The MCA address format is as follows: * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]} * - * The normalized address format is fixed in hardware and is as follows: - * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]} - * * Additionally, the PC and Bank bits may be hashed. This must be accounted for before * reconstructing the normalized address. */ @@ -158,18 +235,10 @@ int get_addr_hash_mi300(void) #define MI300_UMC_MCA_PC BIT(25) #define MI300_UMC_MCA_SID GENMASK(27, 26) -#define MI300_NA_COL_1_0 GENMASK(6, 5) -#define MI300_NA_PC BIT(7) -#define MI300_NA_COL_3_2 GENMASK(9, 8) -#define MI300_NA_BANK_3_2 GENMASK(11, 10) -#define MI300_NA_BANK_1_0 GENMASK(13, 12) -#define MI300_NA_COL_4 BIT(14) -#define MI300_NA_ROW GENMASK(28, 15) -#define MI300_NA_SID GENMASK(30, 29) - static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) { - u16 i, col, row, bank, pc, sid, temp; + u16 i, col, row, bank, pc, sid; + u32 temp; col = FIELD_GET(MI300_UMC_MCA_COL, addr); bank = FIELD_GET(MI300_UMC_MCA_BANK, addr); @@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) /* Calculate hash for PC bit. */ if (addr_hash.pc.xor_enable) { - /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */ - bank |= sid << 5; - temp = bitwise_xor_bits(col & addr_hash.pc.col_xor); temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor); - temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor); + /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */ + temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor); pc ^= temp; - - /* Drop SID bits for the sake of debug printing later. */ - bank &= 0x1F; } /* Reconstruct the normalized address starting with NA[4:0] = 0 */ addr = 0; - /* NA[6:5] = Column[1:0] */ - temp = col & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_1_0, temp); - - /* NA[7] = PC */ - addr |= FIELD_PREP(MI300_NA_PC, pc); - - /* NA[9:8] = Column[3:2] */ - temp = (col >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_3_2, temp); + /* Column bits */ + for (i = 0; i < NUM_COL_BITS; i++) { + temp = (col >> i) & 0x1; + addr |= temp << bit_shifts.col[i]; + } - /* NA[11:10] = Bank[3:2] */ - temp = (bank >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp); + /* Bank bits */ + for (i = 0; i < NUM_BANK_BITS; i++) { + temp = (bank >> i) & 0x1; + addr |= temp << bit_shifts.bank[i]; + } - /* NA[13:12] = Bank[1:0] */ - temp = bank & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp); + /* Row lo bits */ + for (i = 0; i < bit_shifts.num_row_lo; i++) { + temp = (row >> i) & 0x1; + addr |= temp << (i + bit_shifts.row_lo); + } - /* NA[14] = Column[4] */ - temp = (col >> 4) & 0x1; - addr |= FIELD_PREP(MI300_NA_COL_4, temp); + /* Row hi bits */ + for (i = 0; i < bit_shifts.num_row_hi; i++) { + temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1; + addr |= temp << (i + bit_shifts.row_hi); + } - /* NA[28:15] = Row[13:0] */ - addr |= FIELD_PREP(MI300_NA_ROW, row); + /* PC bit */ + addr |= pc << bit_shifts.pc; - /* NA[30:29] = SID[1:0] */ - addr |= FIELD_PREP(MI300_NA_SID, sid); + /* SID bits */ + for (i = 0; i < NUM_SID_BITS; i++) { + temp = (sid >> i) & 0x1; + addr |= temp << bit_shifts.sid[i]; + } pr_debug("Addr=0x%016lx", addr); pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a226dc1b65d7..4eb0837298d4 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state) } } -static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, - struct scsi_sense_hdr *sense_hdr) +static void alua_handle_state_transition(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + rcu_read_unlock(); + alua_check(sdev, false); +} + +static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ - rcu_read_lock(); - pg = rcu_dereference(h->pg); - if (pg) - pg->state = SCSI_ACCESS_STATE_TRANSITIONING; - rcu_read_unlock(); - alua_check(sdev, false); + alua_handle_state_transition(sdev); return NEEDS_RETRY; } break; case UNIT_ATTENTION: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { + /* + * LUN Not Accessible - ALUA state transition + */ + alua_handle_state_transition(sdev); + return NEEDS_RETRY; + } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. @@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev) retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); - if (sense_hdr.sense_key == NOT_READY && + if ((sense_hdr.sense_key == NOT_READY || + sense_hdr.sense_key == UNIT_ATTENTION) && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 1638109a68a0..cd261b48eb46 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -2163,10 +2163,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(persistent_id); +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_enable attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + + if (!sdev_priv_data) + return 0; + + return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!sas_ata_ncq_prio_supported(sdev)) + return -EINVAL; + + sdev_priv_data->ncq_prio_enable = ncq_prio_enable; + + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + static struct attribute *mpi3mr_dev_attrs[] = { &dev_attr_sas_address.attr, &dev_attr_device_handle.attr, &dev_attr_persistent_id.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, NULL, }; diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index 329cc6ec3b58..82aa4e418c5a 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -1364,7 +1364,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, continue; if (i > sizeof(mr_sas_port->phy_mask) * 8) { - ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n", + ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", i, sizeof(mr_sas_port->phy_mask) * 8); goto out_fail; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1320e06727df..b2bcf4a27ddc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pd_handles_sz++; + /* + * pd_handles_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may occur. + */ + ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); if (!ioc->pd_handles) { @@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pend_os_device_add_sz++; + + /* + * pend_os_device_add_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory may occur. + */ + ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, + sizeof(unsigned long)); ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); if (!ioc->pend_os_device_add) { @@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) if (ioc->facts.MaxDevHandle % 8) pd_handles_sz++; + /* + * pd_handles should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may + * occur. + */ + pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, GFP_KERNEL); if (!pd_handles) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bf100a4ebfc3..fe1e96fda284 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -2048,9 +2048,6 @@ void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); -/* NCQ Prio Handling Check */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev); - void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_debugfs(void); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 1c9fd26195b8..87784c96249a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev, { struct scsi_device *sdev = to_scsi_device(dev); - return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); } static DEVICE_ATTR_RO(sas_ncq_prio_supported); @@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev, if (kstrtobool(buf, &ncq_prio_enable)) return -EINVAL; - if (!scsih_ncq_prio_supp(sdev)) + if (!sas_ata_ncq_prio_supported(sdev)) return -EINVAL; sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 89ef43a5ef86..870ec2cb4af4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -302,8 +302,8 @@ struct _scsi_io_transfer { /** * _scsih_set_debug_level - global setting of ioc->logging_level. - * @val: ? - * @kp: ? + * @val: value of the parameter to be set + * @kp: pointer to kernel_param structure * * Note: The logging levels are defined in mpt3sas_debug.h. */ @@ -12571,29 +12571,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/** - * scsih_ncq_prio_supp - Check for NCQ command priority support - * @sdev: scsi device struct - * - * This is called when a user indicates they would like to enable - * ncq command priorities. This works only on SATA devices. - */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev) -{ - struct scsi_vpd *vpd; - bool ncq_prio_supp = false; - - rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (!vpd || vpd->len < 214) - goto out; - - ncq_prio_supp = (vpd->data[213] >> 4) & 1; -out: - rcu_read_unlock(); - - return ncq_prio_supp; -} /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 5058e01b65a2..98afdfe63600 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -363,6 +363,7 @@ struct qedf_ctx { #define QEDF_IN_RECOVERY 5 #define QEDF_DBG_STOP_IO 6 #define QEDF_PROBING 8 +#define QEDF_STAG_IN_PROGRESS 9 unsigned long flags; /* Miscellaneous state flags */ int fipvlan_retries; u8 num_queues; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index fd12439cbaab..49adddf978cc 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, */ if (resp == fc_lport_flogi_resp) { qedf->flogi_cnt++; + qedf->flogi_pending++; + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + qedf->flogi_pending = 0; + } + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { schedule_delayed_work(&qedf->stag_work, 2); return NULL; } - qedf->flogi_pending++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, arg, timeout); } @@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) struct qedf_ctx *qedf; struct qed_link_output if_link; + qedf = lport_priv(lport); + if (lport->vport) { + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); printk_ratelimited("Cannot issue host reset on NPIV port.\n"); return; } - qedf = lport_priv(lport); - qedf->flogi_pending = 0; /* For host reset, essentially do a soft link up/down */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); @@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) if (!if_link.link_up) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Physical link is not up.\n"); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); return; } /* Flush and wait to make sure link down is processed */ @@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) "Queue link up work.\n"); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); } /* Reset the host by gracefully logging out and then logging back in */ @@ -3463,6 +3473,7 @@ retry_probe: } /* Start the Slowpath-process */ + memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params)); slowpath_params.int_mode = QED_INT_MODE_MSIX; slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; @@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; int rc; + int cnt = 0; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); @@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) return; } +stag_in_prog: + if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt); + cnt++; + + if (cnt < 5) { + msleep(500); + goto stag_in_prog; + } + } + if (mode != QEDF_MODE_RECOVERY) set_bit(QEDF_UNLOADING, &qedf->flags); @@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); + if (!qedf) { + QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); + return; + } + + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Already is in recovery, hence not calling software context reset.\n"); + return; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + return; + } + + set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); + printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", dev_name(&qedf->pdev->dev), __func__, __LINE__, qedf->dbg_ctx.host_no); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3e0c0381277a..ee69bd35889d 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) if (result < SCSI_VPD_HEADER_SIZE) return 0; + if (result > sizeof(vpd)) { + dev_warn_once(&sdev->sdev_gendev, + "%s: long VPD page 0 length: %d bytes\n", + __func__, result); + result = sizeof(vpd); + } + result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; @@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev) sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; + + /* + * If the device supports CDL, make sure that the current drive + * feature status is consistent with the user controlled + * cdl_enable state. + */ + scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 424a89513814..4e33f1661e4c 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); +/** + * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support + * @sdev: SCSI device + * + * Check if an ATA device supports NCQ priority using VPD page 89h (ATA + * Information). Since this VPD page is implemented only for ATA devices, + * this function always returns false for SCSI devices. + */ +bool sas_ata_ncq_prio_supported(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supported = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd && vpd->len >= 214) + ncq_prio_supported = (vpd->data[213] >> 4) & 1; + rcu_read_unlock(); + + return ncq_prio_supported; +} +EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported); + /* * SAS Phy attributes */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index f6c822c9cbd2..37dd6ead72a4 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3565,16 +3565,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, static void sd_read_block_zero(struct scsi_disk *sdkp) { - unsigned int buf_len = sdkp->device->sector_size; - char *buffer, cmd[10] = { }; + struct scsi_device *sdev = sdkp->device; + unsigned int buf_len = sdev->sector_size; + u8 *buffer, cmd[16] = { }; buffer = kmalloc(buf_len, GFP_KERNEL); if (!buffer) return; - cmd[0] = READ_10; - put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ - put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + if (sdev->use_16_for_rw) { + cmd[0] = READ_16; + put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */ + } else { + cmd[0] = READ_10; + put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + } scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, SD_TIMEOUT, sdkp->max_retries, NULL); diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 1175f2e213b5..dc899277b3a4 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *); int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *); int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); int sr_reset(struct cdrom_device_info *); -int sr_select_speed(struct cdrom_device_info *cdi, int speed); +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); int sr_is_xa(Scsi_CD *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 5b0b35e60e61..a0d2556a27bb 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi) return 0; } -int sr_select_speed(struct cdrom_device_info *cdi, int speed) +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; + /* avoid exceeding the max speed or overflowing integer bounds */ + speed = clamp(0, speed, 0xffff / 177); + if (speed == 0) speed = 0xffff; /* set to max */ else diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 54cbe652a4df..3f953504244b 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -4358,6 +4358,33 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) return ctlr->transfer(spi, message); } +static void devm_spi_unoptimize_message(void *msg) +{ + spi_unoptimize_message(msg); +} + +/** + * devm_spi_optimize_message - managed version of spi_optimize_message() + * @dev: the device that manages @msg (usually @spi->dev) + * @spi: the device that will be used for the message + * @msg: the message to optimize + * Return: zero on success, else a negative error code + * + * spi_unoptimize_message() will automatically be called when the device is + * removed. + */ +int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, + struct spi_message *msg) +{ + int ret; + + ret = spi_optimize_message(spi, msg); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); +} + /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 297af1d80b12..69daeba974f2 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1759,7 +1759,7 @@ static int vchiq_probe(struct platform_device *pdev) if (err) goto failed_platform_init; - vchiq_debugfs_init(); + vchiq_debugfs_init(&mgmt->state); dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", VCHIQ_VERSION, VCHIQ_VERSION_MIN); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c index 54e7bf029d9a..1f74d0bb33ba 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c @@ -42,9 +42,9 @@ static int debugfs_trace_show(struct seq_file *f, void *offset) static int vchiq_dump_show(struct seq_file *f, void *offset) { - struct vchiq_instance *instance = f->private; + struct vchiq_state *state = f->private; - vchiq_dump_state(f, instance->state); + vchiq_dump_state(f, state); return 0; } @@ -121,12 +121,12 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) debugfs_remove_recursive(node->dentry); } -void vchiq_debugfs_init(void) +void vchiq_debugfs_init(struct vchiq_state *state) { vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL); vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir); - debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL, + debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state, &vchiq_dump_fops); } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h index e9bf055a4ca9..fabffd81b1ec 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h @@ -10,7 +10,7 @@ struct vchiq_debugfs_node { struct dentry *dentry; }; -void vchiq_debugfs_init(void); +void vchiq_debugfs_init(struct vchiq_state *state); void vchiq_debugfs_deinit(void); diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c index e0fdc497bfcc..65974fe8be0d 100644 --- a/drivers/thermal/gov_step_wise.c +++ b/drivers/thermal/gov_step_wise.c @@ -93,6 +93,23 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, if (instance->initialized && old_target == instance->target) continue; + if (trip->type == THERMAL_TRIP_PASSIVE) { + /* + * If the target state for this thermal instance + * changes from THERMAL_NO_TARGET to something else, + * ensure that the zone temperature will be updated + * (assuming enabled passive cooling) until it becomes + * THERMAL_NO_TARGET again, or the cooling device may + * not be reset to its initial state. + */ + if (old_target == THERMAL_NO_TARGET && + instance->target != THERMAL_NO_TARGET) + tz->passive++; + else if (old_target != THERMAL_NO_TARGET && + instance->target == THERMAL_NO_TARGET) + tz->passive--; + } + instance->initialized = true; mutex_lock(&instance->cdev->lock); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 54cce4e523bc..d70e76dd3c94 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -467,6 +467,21 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor, governor->trip_crossed(tz, trip, crossed_up); } +static void thermal_trip_crossed(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_governor *governor, + bool crossed_up) +{ + if (crossed_up) { + thermal_notify_tz_trip_up(tz, trip); + thermal_debug_tz_trip_up(tz, trip); + } else { + thermal_notify_tz_trip_down(tz, trip); + thermal_debug_tz_trip_down(tz, trip); + } + thermal_governor_trip_crossed(governor, tz, trip, crossed_up); +} + static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a, const struct list_head *b) { @@ -506,18 +521,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, handle_thermal_trip(tz, td, &way_up_list, &way_down_list); list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_up_list, notify_list_node) { - thermal_notify_tz_trip_up(tz, &td->trip); - thermal_debug_tz_trip_up(tz, &td->trip); - thermal_governor_trip_crossed(governor, tz, &td->trip, true); - } + list_for_each_entry(td, &way_up_list, notify_list_node) + thermal_trip_crossed(tz, &td->trip, governor, true); list_sort(NULL, &way_down_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_down_list, notify_list_node) { - thermal_notify_tz_trip_down(tz, &td->trip); - thermal_debug_tz_trip_down(tz, &td->trip); - thermal_governor_trip_crossed(governor, tz, &td->trip, false); - } + list_for_each_entry(td, &way_down_list, notify_list_node) + thermal_trip_crossed(tz, &td->trip, governor, false); if (governor->manage) governor->manage(tz); @@ -593,6 +602,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); +void thermal_zone_trip_down(struct thermal_zone_device *tz, + const struct thermal_trip *trip) +{ + thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false); +} + int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), void *data) { @@ -984,9 +999,17 @@ __thermal_cooling_device_register(struct device_node *np, if (ret) goto out_cdev_type; + /* + * The cooling device's current state is only needed for debug + * initialization below, so a failure to get it does not cause + * the entire cooling device initialization to fail. However, + * the debug will not work for the device if its initial state + * cannot be determined and drivers are responsible for ensuring + * that this will not happen. + */ ret = cdev->ops->get_cur_state(cdev, ¤t_state); if (ret) - goto out_cdev_type; + current_state = ULONG_MAX; thermal_cooling_device_setup_sysfs(cdev); @@ -1001,7 +1024,8 @@ __thermal_cooling_device_register(struct device_node *np, return ERR_PTR(ret); } - thermal_debug_cdev_add(cdev, current_state); + if (current_state <= cdev->max_state) + thermal_debug_cdev_add(cdev, current_state); /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index d9785e5bbb08..20e7b45673d6 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -246,6 +246,8 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz, void thermal_zone_trip_updated(struct thermal_zone_device *tz, const struct thermal_trip *trip); int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); +void thermal_zone_trip_down(struct thermal_zone_device *tz, + const struct thermal_trip *trip); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *tz); diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index 91f9c21235a8..942447229157 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -91,6 +91,8 @@ struct cdev_record { * * @timestamp: the trip crossing timestamp * @duration: total time when the zone temperature was above the trip point + * @trip_temp: trip temperature at mitigation start + * @trip_hyst: trip hysteresis at mitigation start * @count: the number of times the zone temperature was above the trip point * @max: maximum recorded temperature above the trip point * @min: minimum recorded temperature above the trip point @@ -99,6 +101,8 @@ struct cdev_record { struct trip_stats { ktime_t timestamp; ktime_t duration; + int trip_temp; + int trip_hyst; int count; int max; int min; @@ -574,6 +578,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, struct thermal_debugfs *thermal_dbg = tz->debugfs; int trip_id = thermal_zone_trip_id(tz, trip); ktime_t now = ktime_get(); + struct trip_stats *trip_stats; if (!thermal_dbg) return; @@ -639,7 +644,10 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id; tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); - tze->trip_stats[trip_id].timestamp = now; + trip_stats = &tze->trip_stats[trip_id]; + trip_stats->trip_temp = trip->temperature; + trip_stats->trip_hyst = trip->hysteresis; + trip_stats->timestamp = now; unlock: mutex_unlock(&thermal_dbg->lock); @@ -794,10 +802,6 @@ static int tze_seq_show(struct seq_file *s, void *v) const struct thermal_trip *trip = &td->trip; struct trip_stats *trip_stats; - /* Skip invalid trips. */ - if (trip->temperature == THERMAL_TEMP_INVALID) - continue; - /* * There is no possible mitigation happening at the * critical trip point, so the stats will be always @@ -836,8 +840,8 @@ static int tze_seq_show(struct seq_file *s, void *v) seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n", 4 , trip_id, 8, type, - 9, trip->temperature, - 9, trip->hysteresis, + 9, trip_stats->trip_temp, + 9, trip_stats->trip_hyst, c, 10, duration_ms, 9, trip_stats->avg, 9, trip_stats->min, diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index d6a6acc78ddb..49e63db68517 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -152,17 +152,23 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, if (trip->temperature == temp) return; + trip->temperature = temp; + thermal_notify_tz_trip_change(tz, trip); + if (temp == THERMAL_TEMP_INVALID) { struct thermal_trip_desc *td = trip_to_trip_desc(trip); - if (trip->type == THERMAL_TRIP_PASSIVE && - tz->temperature >= td->threshold) { + if (tz->temperature >= td->threshold) { /* - * The trip has been crossed, so the thermal zone's - * passive count needs to be adjusted. + * The trip has been crossed on the way up, so some + * adjustments are needed to compensate for the lack + * of it going forward. */ - tz->passive--; - WARN_ON_ONCE(tz->passive < 0); + if (trip->type == THERMAL_TRIP_PASSIVE) { + tz->passive--; + WARN_ON_ONCE(tz->passive < 0); + } + thermal_zone_trip_down(tz, trip); } /* * Invalidate the threshold to avoid triggering a spurious @@ -170,7 +176,5 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, */ td->threshold = INT_MAX; } - trip->temperature = temp; - thermal_notify_tz_trip_change(tz, trip); } EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp); diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 193e9dfc983b..70b52aac3d97 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port) debugfs_create_file("run", 0600, dir, port, &margining_run_fops); debugfs_create_file("results", 0600, dir, port, &margining_results_fops); debugfs_create_file("test", 0600, dir, port, &margining_test_fops); - if (independent_voltage_margins(usb4) || - (supports_time(usb4) && independent_time_margins(usb4))) + if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL || + (supports_time(usb4) && + independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR)) debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index f252d0b5a434..5e9ca4376d68 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_closing(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_closing(tty, cp, fp, count, false); } else { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_standard(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_standard(tty, cp, fp, count, false); flush_echoes(tty); if (tty->ops->flush_chars) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index ba9f4dc4e71d..fb809e32c6ae 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -55,6 +55,34 @@ #define DW_UART_QUIRK_SKIP_SET_RATE BIT(2) #define DW_UART_QUIRK_IS_DMA_FC BIT(3) #define DW_UART_QUIRK_APMC0D08 BIT(4) +#define DW_UART_QUIRK_CPR_VALUE BIT(5) + +struct dw8250_platform_data { + u8 usr_reg; + u32 cpr_value; + unsigned int quirks; +}; + +struct dw8250_data { + struct dw8250_port_data data; + const struct dw8250_platform_data *pdata; + + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct notifier_block clk_notifier; + struct work_struct clk_work; + struct reset_control *rst; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) +{ + return container_of(data, struct dw8250_data, data); +} static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) { @@ -432,6 +460,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p) static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) { unsigned int quirks = data->pdata ? data->pdata->quirks : 0; + u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0; + + if (quirks & DW_UART_QUIRK_CPR_VALUE) + data->data.cpr_value = cpr_value; #ifdef CONFIG_64BIT if (quirks & DW_UART_QUIRK_OCTEON) { @@ -714,8 +746,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = { static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { .usr_reg = DW_UART_USR, - .cpr_val = 0x00012f32, - .quirks = DW_UART_QUIRK_IS_DMA_FC, + .cpr_value = 0x00012f32, + .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC, }; static const struct dw8250_platform_data dw8250_starfive_jh7100_data = { diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c index 3e33ddf7bc80..5a2520943dfd 100644 --- a/drivers/tty/serial/8250/8250_dwlib.c +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = { void dw8250_setup_port(struct uart_port *p) { struct dw8250_port_data *pd = p->private_data; - struct dw8250_data *data = to_dw8250_data(pd); struct uart_8250_port *up = up_to_u8250p(p); u32 reg, old_dlf; @@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p) reg = dw8250_readl_ext(p, DW_UART_CPR); if (!reg) { - reg = data->pdata->cpr_val; + reg = pd->cpr_value; dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg); } if (!reg) diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h index f13e91f2cace..7dd2a8e7b780 100644 --- a/drivers/tty/serial/8250/8250_dwlib.h +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -2,15 +2,10 @@ /* Synopsys DesignWare 8250 library header file. */ #include <linux/io.h> -#include <linux/notifier.h> #include <linux/types.h> -#include <linux/workqueue.h> #include "8250.h" -struct clk; -struct reset_control; - struct dw8250_port_data { /* Port properties */ int line; @@ -19,42 +14,16 @@ struct dw8250_port_data { struct uart_8250_dma dma; /* Hardware configuration */ + u32 cpr_value; u8 dlf_size; /* RS485 variables */ bool hw_rs485_support; }; -struct dw8250_platform_data { - u8 usr_reg; - u32 cpr_val; - unsigned int quirks; -}; - -struct dw8250_data { - struct dw8250_port_data data; - const struct dw8250_platform_data *pdata; - - int msr_mask_on; - int msr_mask_off; - struct clk *clk; - struct clk *pclk; - struct notifier_block clk_notifier; - struct work_struct clk_work; - struct reset_control *rst; - - unsigned int skip_autocfg:1; - unsigned int uart_16550_compatible:1; -}; - void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old); void dw8250_setup_port(struct uart_port *p); -static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) -{ - return container_of(data, struct dw8250_data, data); -} - static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) { if (p->iotype == UPIO_MEM32BE) diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c index f1a51b00b1b9..ba96fa913e7f 100644 --- a/drivers/tty/serial/8250/8250_pxa.c +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev) uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.port.fifosize = 64; + uart.tx_loadsz = 32; uart.dl_write = serial_pxa_dl_write; ret = serial8250_register_8250_port(&uart); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 4fdd7857ef4d..28e4beeabf8f 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1023,8 +1023,9 @@ config SERIAL_SCCNXP_CONSOLE help Support for console on SCCNXP serial ports. -config SERIAL_SC16IS7XX_CORE +config SERIAL_SC16IS7XX tristate "NXP SC16IS7xx UART support" + depends on SPI_MASTER || I2C select SERIAL_CORE select SERIAL_SC16IS7XX_SPI if SPI_MASTER select SERIAL_SC16IS7XX_I2C if I2C diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index faa45f2b8bb0..6ff74f0a9530 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -75,7 +75,7 @@ obj-$(CONFIG_SERIAL_SA1100) += sa1100.o obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o -obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o +obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o obj-$(CONFIG_SERIAL_SC16IS7XX_SPI) += sc16is7xx_spi.o obj-$(CONFIG_SERIAL_SC16IS7XX_I2C) += sc16is7xx_i2c.o obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 2c1a0254d3f4..0c4d60976663 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -622,7 +622,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count) return -EL3HLT; port = uart_port_lock(state, flags); - if (WARN_ON_ONCE(!state->port.xmit_buf)) { + if (!state->port.xmit_buf) { uart_port_unlock(port, flags); return 0; } diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c index 91a338d3cb34..d35f1d24156c 100644 --- a/drivers/tty/serial/serial_port.c +++ b/drivers/tty/serial/serial_port.c @@ -64,6 +64,13 @@ static int serial_port_runtime_suspend(struct device *dev) if (port->flags & UPF_DEAD) return 0; + /* + * Nothing to do on pm_runtime_force_suspend(), see + * DEFINE_RUNTIME_DEV_PM_OPS. + */ + if (!pm_runtime_enabled(dev)) + return 0; + uart_port_lock_irqsave(port, &flags); if (!port_dev->tx_enabled) { uart_port_unlock_irqrestore(port, flags); diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 005d63ab1f44..8944548c30fa 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -634,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; unsigned long flags; - int err = FAILED; + int err; if (!ufshcd_cmd_inflight(lrbp->cmd)) { dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", __func__, tag); - goto out; + return FAILED; } /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", __func__, tag); - goto out; + return FAILED; } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); @@ -659,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) */ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", __func__, hwq->id, tag); - goto out; + return FAILED; } /* @@ -667,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) * in the completion queue either. Query the device to see if * the command is being processed in the device. */ - if (ufshcd_try_to_abort_task(hba, tag)) { + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); lrbp->req_abort_skip = true; - goto out; + return FAILED; } - err = SUCCESS; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); spin_unlock_irqrestore(&hwq->cq_lock, flags); -out: - return err; + return SUCCESS; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0cf07194bbe8..e5e9da61f15d 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1366,7 +1366,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ - ufshcd_scsi_block_requests(hba); + blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1375,7 +1375,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); goto out; } @@ -1396,7 +1396,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); } diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 3a9a0dd4be70..949eca0adebe 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_FSL_USB2) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ obj-$(CONFIG_USB_MAX3421_HCD) += host/ +obj-$(CONFIG_USB_XEN_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index bada13f704b6..835bf2428dc6 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1084,6 +1084,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) return -ENODEV; } + ret = ci_ulpi_init(ci); + if (ret) + return ret; + if (ci->platdata->phy) { ci->phy = ci->platdata->phy; } else if (ci->platdata->usb_phy) { @@ -1138,10 +1142,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) goto ulpi_exit; } - ret = ci_ulpi_init(ci); - if (ret) - return ret; - ci->hw_bank.phys = res->start; ci->irq = platform_get_irq(pdev, 0); diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c index 89fb51e2c3de..dfec07e8ae1d 100644 --- a/drivers/usb/chipidea/ulpi.c +++ b/drivers/usb/chipidea/ulpi.c @@ -68,6 +68,11 @@ int ci_ulpi_init(struct ci_hdrc *ci) if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI) return 0; + /* + * Set PORTSC correctly so we can read/write ULPI registers for + * identification purposes + */ + hw_phymode_configure(ci); ci->ulpi_ops.read = ci_ulpi_read; ci->ulpi_ops.write = ci_ulpi_write; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c553decb5461..6830be4419e2 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb) dev_err(&desc->intf->dev, "Stall on int endpoint\n"); goto sw; /* halt is cleared in work */ default: - dev_err(&desc->intf->dev, + dev_err_ratelimited(&desc->intf->dev, "nonzero urb status received: %d\n", status); break; } } if (urb->actual_length < sizeof(struct usb_cdc_notification)) { - dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", + dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n", urb->actual_length); goto exit; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index e3366f4d82b9..1ff7d901fede 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; + unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb) /* pass ownership to the completion handler */ urb->status = status; /* - * This function can be called in task context inside another remote - * coverage collection section, but kcov doesn't support that kind of - * recursion yet. Only collect coverage in softirq context for now. + * Only collect coverage in the softirq context and disable interrupts + * to avoid scenarios with nested remote coverage collection sections + * that KCOV does not support. + * See the comment next to kcov_remote_start_usb_softirq() for details. */ - kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); + flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - kcov_remote_stop_softirq(); + kcov_remote_stop_softirq(flags); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c040d816e626..05881153883e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -36,6 +36,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -395,6 +396,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_RESET_ON_RESUME; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) { + xhci->quirks |= XHCI_RESET_ON_RESUME; + xhci->quirks |= XHCI_BROKEN_STREAMS; + } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_ZERO_64B_REGS; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9e90d2952760..fd0cde3d1569 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1031,13 +1031,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: + case TD_CLEARING_CACHE_DEFERRED: + if (cached_td) { + if (cached_td->urb->stream_id != td->urb->stream_id) { + /* Multiple streams case, defer move dq */ + xhci_dbg(xhci, + "Move dq deferred: stream %u URB %p\n", + td->urb->stream_id, td->urb); + td->cancel_status = TD_CLEARING_CACHE_DEFERRED; + break; + } + + /* Should never happen, but clear the TD if it does */ + xhci_warn(xhci, + "Found multiple active URBs %p and %p in stream %u?\n", + td->urb, cached_td->urb, + td->urb->stream_id); + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + td->cancel_status = TD_CLEARING_CACHE; - if (cached_td) - /* FIXME stream case, several stopped rings */ - xhci_dbg(xhci, - "Move dq past stream %u URB %p instead of stream %u URB %p\n", - td->urb->stream_id, td->urb, - cached_td->urb->stream_id, cached_td->urb); cached_td = td; break; } @@ -1057,10 +1071,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (err) { /* Failed to move past cached td, just set cached TDs to no-op */ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - if (td->cancel_status != TD_CLEARING_CACHE) + /* + * Deferred TDs need to have the deq pointer set after the above command + * completes, so if that failed we just give up on all of them (and + * complain loudly since this could cause issues due to caching). + */ + if (td->cancel_status != TD_CLEARING_CACHE && + td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", - td->urb); + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + td->urb); td_to_noop(xhci, ring, td, false); td->cancel_status = TD_CLEARED; } @@ -1346,6 +1366,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_td *td, *tmp_td; + bool deferred = false; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); @@ -1432,6 +1453,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) { + deferred = true; } else { xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", __func__, td->urb, td->cancel_status); @@ -1441,8 +1464,17 @@ cleanup: ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + + if (deferred) { + /* We have more streams to clear */ + xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", + __func__); + xhci_invalidate_cancelled_tds(ep); + } else { + /* Restart any rings with pending URBs */ + xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } } static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, @@ -2524,9 +2556,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ - ep_trb_len = 0; - remaining = 0; - break; + td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb); + goto finish_td; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep->err_count++ > MAX_SOFT_RETRY) || diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 30415158ed3c..78d014c4d884 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status { TD_DIRTY = 0, TD_HALTED, TD_CLEARING_CACHE, + TD_CLEARING_CACHE_DEFERRED, TD_CLEARED, }; diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 115f05a6201a..40d34cc28344 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -105,6 +105,8 @@ struct alauda_info { unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ + + bool media_initialized; }; #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) @@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us) } /* Check for media change */ - if (status[0] & 0x08) { + if (status[0] & 0x08 || !info->media_initialized) { usb_stor_dbg(us, "Media change detected\n"); alauda_free_maps(&MEDIA_INFO(us)); - alauda_init_media(us); - + rc = alauda_init_media(us); + if (rc == USB_STOR_TRANSPORT_GOOD) + info->media_initialized = true; info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 8a1af08f71b6..5d4da962acc8 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port) memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps); caps.role = TYPEC_SOURCE; - if (cap) + if (cap) { usb_power_delivery_unregister_capabilities(cap); + port->partner_source_caps = NULL; + } cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); if (IS_ERR(cap)) @@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port) port->tcpc->set_bist_data(port->tcpc, false); switch (port->state) { + case TOGGLING: case ERROR_RECOVERY: case PORT_RESET: case PORT_RESET_WAIT_OFF: diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index cb52e7b0a2c5..2cc7aedd490f 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -153,8 +153,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) } if (cci & UCSI_CCI_ERROR) { - if (cmd == UCSI_GET_ERROR_STATUS) + if (cmd == UCSI_GET_ERROR_STATUS) { + ret = ucsi_acknowledge(ucsi, false); + if (ret) + return ret; + return -EIO; + } return ucsi_read_error(ucsi); } diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index f7546bb488c3..985a880e86da 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -14,7 +14,7 @@ #include <linux/soc/qcom/pmic_glink.h> #include "ucsi.h" -#define PMIC_GLINK_MAX_PORTS 2 +#define PMIC_GLINK_MAX_PORTS 3 #define UCSI_BUF_SIZE 48 diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c index e75da0a70d1f..bb1817bd4ff3 100644 --- a/drivers/vfio/device_cdev.c +++ b/drivers/vfio/device_cdev.c @@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep) filep->private_data = df; + /* + * Use the pseudo fs inode on the device to link all mmaps + * to the same address space, allowing us to unmap all vmas + * associated to this device using unmap_mapping_range(). + */ + filep->f_mapping = device->inode->i_mapping; + return 0; err_put_registration: diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c index 610a429c6191..ded364588d29 100644 --- a/drivers/vfio/group.c +++ b/drivers/vfio/group.c @@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device) */ filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); + /* + * Use the pseudo fs inode on the device to link all mmaps + * to the same address space, allowing us to unmap all vmas + * associated to this device using unmap_mapping_range(). + */ + filep->f_mapping = device->inode->i_mapping; + if (device->group->type == VFIO_NO_IOMMU) dev_warn(device->dev, "vfio-noiommu device opened by user " "(%s:%d)\n", current->comm, task_pid_nr(current)); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 80cae87fff36..987c7921affa 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu } EXPORT_SYMBOL_GPL(vfio_pci_core_write); -/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ -static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try) +static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev) { - struct vfio_pci_mmap_vma *mmap_vma, *tmp; + struct vfio_device *core_vdev = &vdev->vdev; + loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX); + loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX); + loff_t len = end - start; - /* - * Lock ordering: - * vma_lock is nested under mmap_lock for vm_ops callback paths. - * The memory_lock semaphore is used by both code paths calling - * into this function to zap vmas and the vm_ops.fault callback - * to protect the memory enable state of the device. - * - * When zapping vmas we need to maintain the mmap_lock => vma_lock - * ordering, which requires using vma_lock to walk vma_list to - * acquire an mm, then dropping vma_lock to get the mmap_lock and - * reacquiring vma_lock. This logic is derived from similar - * requirements in uverbs_user_mmap_disassociate(). - * - * mmap_lock must always be the top-level lock when it is taken. - * Therefore we can only hold the memory_lock write lock when - * vma_list is empty, as we'd need to take mmap_lock to clear - * entries. vma_list can only be guaranteed empty when holding - * vma_lock, thus memory_lock is nested under vma_lock. - * - * This enables the vm_ops.fault callback to acquire vma_lock, - * followed by memory_lock read lock, while already holding - * mmap_lock without risk of deadlock. - */ - while (1) { - struct mm_struct *mm = NULL; - - if (try) { - if (!mutex_trylock(&vdev->vma_lock)) - return 0; - } else { - mutex_lock(&vdev->vma_lock); - } - while (!list_empty(&vdev->vma_list)) { - mmap_vma = list_first_entry(&vdev->vma_list, - struct vfio_pci_mmap_vma, - vma_next); - mm = mmap_vma->vma->vm_mm; - if (mmget_not_zero(mm)) - break; - - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - mm = NULL; - } - if (!mm) - return 1; - mutex_unlock(&vdev->vma_lock); - - if (try) { - if (!mmap_read_trylock(mm)) { - mmput(mm); - return 0; - } - } else { - mmap_read_lock(mm); - } - if (try) { - if (!mutex_trylock(&vdev->vma_lock)) { - mmap_read_unlock(mm); - mmput(mm); - return 0; - } - } else { - mutex_lock(&vdev->vma_lock); - } - list_for_each_entry_safe(mmap_vma, tmp, - &vdev->vma_list, vma_next) { - struct vm_area_struct *vma = mmap_vma->vma; - - if (vma->vm_mm != mm) - continue; - - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - - zap_vma_ptes(vma, vma->vm_start, - vma->vm_end - vma->vm_start); - } - mutex_unlock(&vdev->vma_lock); - mmap_read_unlock(mm); - mmput(mm); - } + unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true); } void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev) { - vfio_pci_zap_and_vma_lock(vdev, false); down_write(&vdev->memory_lock); - mutex_unlock(&vdev->vma_lock); + vfio_pci_zap_bars(vdev); } u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev) @@ -1725,99 +1645,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c up_write(&vdev->memory_lock); } -/* Caller holds vma_lock */ -static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev, - struct vm_area_struct *vma) -{ - struct vfio_pci_mmap_vma *mmap_vma; - - mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT); - if (!mmap_vma) - return -ENOMEM; - - mmap_vma->vma = vma; - list_add(&mmap_vma->vma_next, &vdev->vma_list); - - return 0; -} - -/* - * Zap mmaps on open so that we can fault them in on access and therefore - * our vma_list only tracks mappings accessed since last zap. - */ -static void vfio_pci_mmap_open(struct vm_area_struct *vma) -{ - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); -} - -static void vfio_pci_mmap_close(struct vm_area_struct *vma) +static unsigned long vma_to_pfn(struct vm_area_struct *vma) { struct vfio_pci_core_device *vdev = vma->vm_private_data; - struct vfio_pci_mmap_vma *mmap_vma; + int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + u64 pgoff; - mutex_lock(&vdev->vma_lock); - list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { - if (mmap_vma->vma == vma) { - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - break; - } - } - mutex_unlock(&vdev->vma_lock); + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff; } static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; - struct vfio_pci_mmap_vma *mmap_vma; - vm_fault_t ret = VM_FAULT_NOPAGE; + unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; + unsigned long addr = vma->vm_start; + vm_fault_t ret = VM_FAULT_SIGBUS; + + pfn = vma_to_pfn(vma); - mutex_lock(&vdev->vma_lock); down_read(&vdev->memory_lock); - /* - * Memory region cannot be accessed if the low power feature is engaged - * or memory access is disabled. - */ - if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) { - ret = VM_FAULT_SIGBUS; - goto up_out; - } + if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) + goto out_unlock; + + ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); + if (ret & VM_FAULT_ERROR) + goto out_unlock; /* - * We populate the whole vma on fault, so we need to test whether - * the vma has already been mapped, such as for concurrent faults - * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if - * we ask it to fill the same range again. + * Pre-fault the remainder of the vma, abort further insertions and + * supress error if fault is encountered during pre-fault. */ - list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { - if (mmap_vma->vma == vma) - goto up_out; - } - - if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) { - ret = VM_FAULT_SIGBUS; - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); - goto up_out; - } + for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) { + if (addr == vmf->address) + continue; - if (__vfio_pci_add_vma(vdev, vma)) { - ret = VM_FAULT_OOM; - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) + break; } -up_out: +out_unlock: up_read(&vdev->memory_lock); - mutex_unlock(&vdev->vma_lock); + return ret; } static const struct vm_operations_struct vfio_pci_mmap_ops = { - .open = vfio_pci_mmap_open, - .close = vfio_pci_mmap_close, .fault = vfio_pci_mmap_fault, }; @@ -1880,11 +1757,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma vma->vm_private_data = vdev; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); /* - * See remap_pfn_range(), called from vfio_pci_fault() but we can't - * change vm_flags within the fault handler. Set them now. + * Set vm_flags now, they should not be changed in the fault handler. + * We want the same flags and page protection (decrypted above) as + * io_remap_pfn_range() would set. * * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64, * allowing KVM stage 2 device mapping attributes to use Normal-NC @@ -2202,8 +2080,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev) mutex_init(&vdev->ioeventfds_lock); INIT_LIST_HEAD(&vdev->dummy_resources_list); INIT_LIST_HEAD(&vdev->ioeventfds_list); - mutex_init(&vdev->vma_lock); - INIT_LIST_HEAD(&vdev->vma_list); INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); xa_init(&vdev->ctx); @@ -2219,7 +2095,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev) mutex_destroy(&vdev->igate); mutex_destroy(&vdev->ioeventfds_lock); - mutex_destroy(&vdev->vma_lock); kfree(vdev->region); kfree(vdev->pm_save); } @@ -2497,26 +2372,15 @@ unwind: return ret; } -/* - * We need to get memory_lock for each device, but devices can share mmap_lock, - * therefore we need to zap and hold the vma_lock for each device, and only then - * get each memory_lock. - */ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, struct vfio_pci_group_info *groups, struct iommufd_ctx *iommufd_ctx) { - struct vfio_pci_core_device *cur_mem; - struct vfio_pci_core_device *cur_vma; - struct vfio_pci_core_device *cur; + struct vfio_pci_core_device *vdev; struct pci_dev *pdev; - bool is_mem = true; int ret; mutex_lock(&dev_set->lock); - cur_mem = list_first_entry(&dev_set->device_list, - struct vfio_pci_core_device, - vdev.dev_set_list); pdev = vfio_pci_dev_set_resettable(dev_set); if (!pdev) { @@ -2533,7 +2397,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, if (ret) goto err_unlock; - list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) { + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) { bool owned; /* @@ -2557,38 +2421,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, * Otherwise, reset is not allowed. */ if (iommufd_ctx) { - int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev, + int devid = vfio_iommufd_get_dev_id(&vdev->vdev, iommufd_ctx); owned = (devid > 0 || devid == -ENOENT); } else { - owned = vfio_dev_in_groups(&cur_vma->vdev, groups); + owned = vfio_dev_in_groups(&vdev->vdev, groups); } if (!owned) { ret = -EINVAL; - goto err_undo; + break; } /* - * Locking multiple devices is prone to deadlock, runaway and - * unwind if we hit contention. + * Take the memory write lock for each device and zap BAR + * mappings to prevent the user accessing the device while in + * reset. Locking multiple devices is prone to deadlock, + * runaway and unwind if we hit contention. */ - if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) { + if (!down_write_trylock(&vdev->memory_lock)) { ret = -EBUSY; - goto err_undo; + break; } + + vfio_pci_zap_bars(vdev); } - cur_vma = NULL; - list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) { - if (!down_write_trylock(&cur_mem->memory_lock)) { - ret = -EBUSY; - goto err_undo; - } - mutex_unlock(&cur_mem->vma_lock); + if (!list_entry_is_head(vdev, + &dev_set->device_list, vdev.dev_set_list)) { + vdev = list_prev_entry(vdev, vdev.dev_set_list); + goto err_undo; } - cur_mem = NULL; /* * The pci_reset_bus() will reset all the devices in the bus. @@ -2599,25 +2463,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, * cause the PCI config space reset without restoring the original * state (saved locally in 'vdev->pm_save'). */ - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) - vfio_pci_set_power_state(cur, PCI_D0); + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) + vfio_pci_set_power_state(vdev, PCI_D0); ret = pci_reset_bus(pdev); + vdev = list_last_entry(&dev_set->device_list, + struct vfio_pci_core_device, vdev.dev_set_list); + err_undo: - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) { - if (cur == cur_mem) - is_mem = false; - if (cur == cur_vma) - break; - if (is_mem) - up_write(&cur->memory_lock); - else - mutex_unlock(&cur->vma_lock); - } + list_for_each_entry_from_reverse(vdev, &dev_set->device_list, + vdev.dev_set_list) + up_write(&vdev->memory_lock); + + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) + pm_runtime_put(&vdev->pdev->dev); - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) - pm_runtime_put(&cur->pdev->dev); err_unlock: mutex_unlock(&dev_set->lock); return ret; diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index e97d796a54fb..a5a62d9d963f 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -22,8 +22,10 @@ #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> +#include <linux/mount.h> #include <linux/mutex.h> #include <linux/pci.h> +#include <linux/pseudo_fs.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> @@ -43,9 +45,13 @@ #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" #define DRIVER_DESC "VFIO - User Level meta-driver" +#define VFIO_MAGIC 0x5646494f /* "VFIO" */ + static struct vfio { struct class *device_class; struct ida device_ida; + struct vfsmount *vfs_mount; + int fs_count; } vfio; #ifdef CONFIG_VFIO_NOIOMMU @@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev) if (device->ops->release) device->ops->release(device); + iput(device->inode); + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); kvfree(device); } @@ -228,6 +236,34 @@ out_free: } EXPORT_SYMBOL_GPL(_vfio_alloc_device); +static int vfio_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type vfio_fs_type = { + .name = "vfio", + .owner = THIS_MODULE, + .init_fs_context = vfio_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static struct inode *vfio_fs_inode_new(void) +{ + struct inode *inode; + int ret; + + ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count); + if (ret) + return ERR_PTR(ret); + + inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb); + if (IS_ERR(inode)) + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); + + return inode; +} + /* * Initialize a vfio_device so it can be registered to vfio core. */ @@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev, init_completion(&device->comp); device->dev = dev; device->ops = ops; + device->inode = vfio_fs_inode_new(); + if (IS_ERR(device->inode)) { + ret = PTR_ERR(device->inode); + goto out_inode; + } if (ops->init) { ret = ops->init(device); @@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev, return 0; out_uninit: + iput(device->inode); + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); +out_inode: vfio_release_device_set(device); ida_free(&vfio.device_ida, device->index); return ret; |