diff options
author | David S. Miller <davem@davemloft.net> | 2020-05-06 22:10:13 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-06 22:10:13 -0700 |
commit | 3793faad7b5b730941b2efbc252d14374b60843a (patch) | |
tree | e1bea43727d87f8fd30ca169f465a3591f15d63b /drivers | |
parent | ae1804de93f6f1626906567ae7deec8e0111259d (diff) | |
parent | a811c1fa0a02c062555b54651065899437bacdbe (diff) | |
download | lwn-3793faad7b5b730941b2efbc252d14374b60843a.tar.gz lwn-3793faad7b5b730941b2efbc252d14374b60843a.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts were all overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
231 files changed, 2148 insertions, 1207 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index b2263ec67b43..5832bc10aca8 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state) end: if (result) { dev_warn(&device->dev, "Failed to change power state to %s\n", - acpi_power_state_string(state)); + acpi_power_state_string(target_state)); } else { device->power.state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to %s\n", device->pnp.bus_id, - acpi_power_state_string(state))); + acpi_power_state_string(target_state))); } return result; diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index ba9d30b28edc..a182e318bd09 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -45,5 +45,4 @@ struct ctl_table firmware_config_table[] = { }, { } }; -EXPORT_SYMBOL_GPL(firmware_config_table); #endif diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 93468b7c6701..9d21bf0f155e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -33,6 +33,15 @@ struct virtio_blk_vq { } ____cacheline_aligned_in_smp; struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; struct virtio_device *vdev; /* The disk structure for the kernel. */ @@ -44,6 +53,13 @@ struct virtio_blk { /* Process context for config space updates */ struct work_struct config_work; + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; @@ -295,10 +311,55 @@ out: return err; } +static void virtblk_get(struct virtio_blk *vblk) +{ + refcount_inc(&vblk->refs); +} + +static void virtblk_put(struct virtio_blk *vblk) +{ + if (refcount_dec_and_test(&vblk->refs)) { + ida_simple_remove(&vd_index_ida, vblk->index); + mutex_destroy(&vblk->vdev_mutex); + kfree(vblk); + } +} + +static int virtblk_open(struct block_device *bd, fmode_t mode) +{ + struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (vblk->vdev) + virtblk_get(vblk); + else + ret = -ENXIO; + + mutex_unlock(&vblk->vdev_mutex); + return ret; +} + +static void virtblk_release(struct gendisk *disk, fmode_t mode) +{ + struct virtio_blk *vblk = disk->private_data; + + virtblk_put(vblk); +} + /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (!vblk->vdev) { + ret = -ENXIO; + goto out; + } /* see if the host passed in geometry config */ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { @@ -314,11 +375,15 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } - return 0; +out: + mutex_unlock(&vblk->vdev_mutex); + return ret; } static const struct block_device_operations virtblk_fops = { .owner = THIS_MODULE, + .open = virtblk_open, + .release = virtblk_release, .getgeo = virtblk_getgeo, }; @@ -655,6 +720,10 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + /* This reference is dropped in virtblk_remove(). */ + refcount_set(&vblk->refs, 1); + mutex_init(&vblk->vdev_mutex); + vblk->vdev = vdev; vblk->sg_elems = sg_elems; @@ -820,8 +889,6 @@ out: static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; - int index = vblk->index; - int refc; /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); @@ -831,18 +898,21 @@ static void virtblk_remove(struct virtio_device *vdev) blk_mq_free_tag_set(&vblk->tag_set); + mutex_lock(&vblk->vdev_mutex); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ + vblk->vdev = NULL; + put_disk(vblk->disk); vdev->config->del_vqs(vdev); kfree(vblk->vqs); - kfree(vblk); - /* Only free device id if we don't have any users */ - if (refc == 1) - ida_simple_remove(&vd_index_ida, index); + mutex_unlock(&vblk->vdev_mutex); + + virtblk_put(vblk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 9dab190c49b0..aa13708c2bc3 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -44,6 +44,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses"); * @base: base port address of the IIO device */ struct quad8_iio { + struct mutex lock; struct counter_device counter; unsigned int fck_prescaler[QUAD8_NUM_COUNTERS]; unsigned int preset[QUAD8_NUM_COUNTERS]; @@ -123,6 +124,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, /* Borrow XOR Carry effectively doubles count range */ *val = (borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -130,6 +133,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, for (i = 0; i < 3; i++) *val |= (unsigned int)inb(base_offset) << (8 * i); + mutex_unlock(&priv->lock); + return IIO_VAL_INT; case IIO_CHAN_INFO_ENABLE: *val = priv->ab_enable[chan->channel]; @@ -160,6 +165,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -183,12 +190,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_ENABLE: /* only boolean values accepted */ if (val < 0 || val > 1) return -EINVAL; + mutex_lock(&priv->lock); + priv->ab_enable[chan->channel] = val; ior_cfg = val | priv->preset_enable[chan->channel] << 1; @@ -196,11 +207,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_SCALE: + mutex_lock(&priv->lock); + /* Quadrature scaling only available in quadrature mode */ - if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1)) + if (!priv->quadrature_mode[chan->channel] && + (val2 || val != 1)) { + mutex_unlock(&priv->lock); return -EINVAL; + } /* Only three gain states (1, 0.5, 0.25) */ if (val == 1 && !val2) @@ -214,11 +232,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev, priv->quadrature_scale[chan->channel] = 2; break; default: + mutex_unlock(&priv->lock); return -EINVAL; } - else + else { + mutex_unlock(&priv->lock); return -EINVAL; + } + mutex_unlock(&priv->lock); return 0; } @@ -255,6 +277,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, if (preset > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + priv->preset[chan->channel] = preset; /* Reset Byte Pointer */ @@ -264,6 +288,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, for (i = 0; i < 3; i++) outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -293,6 +319,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[chan->channel] = preset_enable; ior_cfg = priv->ab_enable[chan->channel] | @@ -301,6 +329,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -358,6 +388,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, unsigned int mode_cfg = cnt_mode << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + mutex_lock(&priv->lock); + priv->count_mode[chan->channel] = cnt_mode; /* Add quadrature mode configuration */ @@ -367,6 +399,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -394,19 +428,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int synchronous_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[chan->channel] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[chan->channel]) + if (synchronous_mode && !priv->quadrature_mode[chan->channel]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[chan->channel] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -434,8 +475,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int quadrature_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - unsigned int mode_cfg = priv->count_mode[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int mode_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[chan->channel] << 1; if (quadrature_mode) mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3; @@ -453,6 +498,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -480,15 +527,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int index_polarity) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] | - index_polarity << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[chan->channel]; priv->index_polarity[chan->channel] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -589,7 +641,7 @@ static int quad8_signal_read(struct counter_device *counter, static int quad8_count_read(struct counter_device *counter, struct counter_count *count, unsigned long *val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; unsigned int flags; unsigned int borrow; @@ -603,6 +655,8 @@ static int quad8_count_read(struct counter_device *counter, /* Borrow XOR Carry effectively doubles count range */ *val = (unsigned long)(borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -610,13 +664,15 @@ static int quad8_count_read(struct counter_device *counter, for (i = 0; i < 3; i++) *val |= (unsigned long)inb(base_offset) << (8 * i); + mutex_unlock(&priv->lock); + return 0; } static int quad8_count_write(struct counter_device *counter, struct counter_count *count, unsigned long val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; int i; @@ -624,6 +680,8 @@ static int quad8_count_write(struct counter_device *counter, if (val > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -647,6 +705,8 @@ static int quad8_count_write(struct counter_device *counter, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; } @@ -667,13 +727,13 @@ static enum counter_count_function quad8_count_functions_list[] = { static int quad8_function_get(struct counter_device *counter, struct counter_count *count, size_t *function) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int id = count->id; - const unsigned int quadrature_mode = priv->quadrature_mode[id]; - const unsigned int scale = priv->quadrature_scale[id]; - if (quadrature_mode) - switch (scale) { + mutex_lock(&priv->lock); + + if (priv->quadrature_mode[id]) + switch (priv->quadrature_scale[id]) { case 0: *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; break; @@ -687,6 +747,8 @@ static int quad8_function_get(struct counter_device *counter, else *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; + mutex_unlock(&priv->lock); + return 0; } @@ -697,10 +759,15 @@ static int quad8_function_set(struct counter_device *counter, const int id = count->id; unsigned int *const quadrature_mode = priv->quadrature_mode + id; unsigned int *const scale = priv->quadrature_scale + id; - unsigned int mode_cfg = priv->count_mode[id] << 1; unsigned int *const synchronous_mode = priv->synchronous_mode + id; - const unsigned int idr_cfg = priv->index_polarity[id] << 1; const int base_offset = priv->base + 2 * id + 1; + unsigned int mode_cfg; + unsigned int idr_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[id] << 1; + idr_cfg = priv->index_polarity[id] << 1; if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { *quadrature_mode = 0; @@ -736,6 +803,8 @@ static int quad8_function_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -852,15 +921,20 @@ static int quad8_index_polarity_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = priv->synchronous_mode[channel_id] | - index_polarity << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[channel_id]; priv->index_polarity[channel_id] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -887,19 +961,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[channel_id] << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[channel_id] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[channel_id]) + if (synchronous_mode && !priv->quadrature_mode[channel_id]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[channel_id] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -964,6 +1045,8 @@ static int quad8_count_mode_set(struct counter_device *counter, break; } + mutex_lock(&priv->lock); + priv->count_mode[count->id] = cnt_mode; /* Set count mode configuration value */ @@ -976,6 +1059,8 @@ static int quad8_count_mode_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -1017,6 +1102,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, if (err) return err; + mutex_lock(&priv->lock); + priv->ab_enable[count->id] = ab_enable; ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; @@ -1024,6 +1111,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return len; } @@ -1052,14 +1141,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter, return sprintf(buf, "%u\n", priv->preset[count->id]); } +static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id, + unsigned int preset) +{ + const unsigned int base_offset = quad8iio->base + 2 * id; + int i; + + quad8iio->preset[id] = preset; + + /* Reset Byte Pointer */ + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + + /* Set Preset Register */ + for (i = 0; i < 3; i++) + outb(preset >> (8 * i), base_offset); +} + static ssize_t quad8_count_preset_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; unsigned int preset; int ret; - int i; ret = kstrtouint(buf, 0, &preset); if (ret) @@ -1069,14 +1172,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, if (preset > 0xFFFFFF) return -EINVAL; - priv->preset[count->id] = preset; + mutex_lock(&priv->lock); - /* Reset Byte Pointer */ - outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + quad8_preset_register_set(priv, count->id, preset); - /* Set Preset Register */ - for (i = 0; i < 3; i++) - outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); return len; } @@ -1084,15 +1184,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, static ssize_t quad8_count_ceiling_read(struct counter_device *counter, struct counter_count *count, void *private, char *buf) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_read(counter, count, private, buf); + mutex_unlock(&priv->lock); + return sprintf(buf, "%u\n", priv->preset[count->id]); } + mutex_unlock(&priv->lock); + /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ return sprintf(buf, "33554431\n"); } @@ -1101,15 +1206,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; + unsigned int ceiling; + int ret; + + ret = kstrtouint(buf, 0, &ceiling); + if (ret) + return ret; + + /* Only 24-bit values are supported */ + if (ceiling > 0xFFFFFF) + return -EINVAL; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_write(counter, count, private, buf, - len); + quad8_preset_register_set(priv, count->id, ceiling); + break; } + mutex_unlock(&priv->lock); + return len; } @@ -1137,6 +1256,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[count->id] = preset_enable; ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; @@ -1144,6 +1265,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -1429,6 +1552,9 @@ static int quad8_probe(struct device *dev, unsigned int id) quad8iio->counter.priv = quad8iio; quad8iio->base = base[id]; + /* Initialize mutex */ + mutex_init(&quad8iio->lock); + /* Reset all counters and disable interrupt function */ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP); /* Set initial configuration for all counters */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4d1e25d1ced1..4d3429b2058f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1059,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, update_turbo_state(); if (global.turbo_disabled) { - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_driver_lock); return -EPERM; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b7bb7c30adeb..b2f9882bc010 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -963,10 +963,12 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct aead_edesc *edesc; int ecode = 0; + bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = rctx->edesc; + has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -979,7 +981,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) aead_request_complete(req, ecode); else crypto_finalize_aead_request(jrp->engine, req, ecode); @@ -995,10 +997,12 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); int ivsize = crypto_skcipher_ivsize(skcipher); int ecode = 0; + bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = rctx->edesc; + has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -1028,7 +1032,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) skcipher_request_complete(req, ecode); else crypto_finalize_skcipher_request(jrp->engine, req, ecode); @@ -1711,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, if (ivsize || mapped_dst_nents > 1) sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + - mapped_dst_nents); + mapped_dst_nents - 1 + !!ivsize); if (sec4_sg_bytes) { edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 943bc0296267..27ff4a3d037e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -583,10 +583,12 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); int ecode = 0; + bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = state->edesc; + has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -603,7 +605,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) req->base.complete(&req->base, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); @@ -632,10 +634,12 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, struct caam_hash_state *state = ahash_request_ctx(req); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; + bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = state->edesc; + has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -663,7 +667,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) req->base.complete(&req->base, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 4fcae37a2e33..2e44d685618f 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -121,11 +121,13 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct rsa_edesc *edesc; int ecode = 0; + bool has_bklog; if (err) ecode = caam_jr_strstatus(dev, err); edesc = req_ctx->edesc; + has_bklog = edesc->bklog; rsa_pub_unmap(dev, edesc, req); rsa_io_unmap(dev, edesc, req); @@ -135,7 +137,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) akcipher_request_complete(req, ecode); else crypto_finalize_akcipher_request(jrp->engine, req, ecode); @@ -152,11 +154,13 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; int ecode = 0; + bool has_bklog; if (err) ecode = caam_jr_strstatus(dev, err); edesc = req_ctx->edesc; + has_bklog = edesc->bklog; switch (key->priv_form) { case FORM1: @@ -176,7 +180,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ - if (!edesc->bklog) + if (!has_bklog) akcipher_request_complete(req, ecode); else crypto_finalize_akcipher_request(jrp->engine, req, ecode); diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index e92b352fb0ad..43d9e2420110 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -673,41 +673,14 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) return 0; } -/* - * chcr_write_cpl_set_tcb_ulp: update tcb values. - * TCB is responsible to create tcp headers, so all the related values - * should be correctly updated. - * @tx_info - driver specific tls info. - * @q - tx queue on which packet is going out. - * @tid - TCB identifier. - * @pos - current index where should we start writing. - * @word - TCB word. - * @mask - TCB word related mask. - * @val - TCB word related value. - * @reply - set 1 if looking for TP response. - * return - next position to write. - */ -static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, - struct sge_eth_txq *q, u32 tid, - void *pos, u16 word, u64 mask, +static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, + u32 tid, void *pos, u16 word, u64 mask, u64 val, u32 reply) { struct cpl_set_tcb_field_core *cpl; struct ulptx_idata *idata; struct ulp_txpkt *txpkt; - void *save_pos = NULL; - u8 buf[48] = {0}; - int left; - left = (void *)q->q.stat - pos; - if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { - if (!left) { - pos = q->q.desc; - } else { - save_pos = pos; - pos = buf; - } - } /* ULP_TXPKT */ txpkt = pos; txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); @@ -732,18 +705,54 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, idata = (struct ulptx_idata *)(cpl + 1); idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); idata->len = htonl(0); + pos = idata + 1; - if (save_pos) { - pos = chcr_copy_to_txd(buf, &q->q, save_pos, - CHCR_SET_TCB_FIELD_LEN); - } else { - /* check again if we are at the end of the queue */ - if (left == CHCR_SET_TCB_FIELD_LEN) + return pos; +} + + +/* + * chcr_write_cpl_set_tcb_ulp: update tcb values. + * TCB is responsible to create tcp headers, so all the related values + * should be correctly updated. + * @tx_info - driver specific tls info. + * @q - tx queue on which packet is going out. + * @tid - TCB identifier. + * @pos - current index where should we start writing. + * @word - TCB word. + * @mask - TCB word related mask. + * @val - TCB word related value. + * @reply - set 1 if looking for TP response. + * return - next position to write. + */ +static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, + struct sge_eth_txq *q, u32 tid, + void *pos, u16 word, u64 mask, + u64 val, u32 reply) +{ + int left = (void *)q->q.stat - pos; + + if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { + if (!left) { pos = q->q.desc; - else - pos = idata + 1; + } else { + u8 buf[48] = {0}; + + __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, + mask, val, reply); + + return chcr_copy_to_txd(buf, &q->q, pos, + CHCR_SET_TCB_FIELD_LEN); + } } + pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, + mask, val, reply); + + /* check again if we are at the end of the queue */ + if (left == CHCR_SET_TCB_FIELD_LEN) + pos = q->q.desc; + return pos; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ccc9eda1bc28..07df88f2e305 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file, return ret; - case DMA_BUF_SET_NAME: + case DMA_BUF_SET_NAME_A: + case DMA_BUF_SET_NAME_B: return dma_buf_set_name(dmabuf, (const char __user *)arg); default: @@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * calls attach() of dma_buf_ops to allow device-specific attach functionality * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. - * @importer_ops [in] importer operations for the attachment - * @importer_priv [in] importer private pointer for the attachment + * @importer_ops: [in] importer operations for the attachment + * @importer_priv: [in] importer private pointer for the attachment * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 092483644315..023db6883d05 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -241,7 +241,8 @@ config FSL_RAID config HISI_DMA tristate "HiSilicon DMA Engine support" - depends on ARM64 || (COMPILE_TEST && PCI_MSI) + depends on ARM64 || COMPILE_TEST + depends on PCI_MSI select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 4830ba658ce1..d31076d9ef25 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev) struct dma_chan_dev *chan_dev; chan_dev = container_of(dev, typeof(*chan_dev), device); - if (atomic_dec_and_test(chan_dev->idr_ref)) { - ida_free(&dma_ida, chan_dev->dev_id); - kfree(chan_dev->idr_ref); - } kfree(chan_dev); } @@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device) } static int __dma_async_device_channel_register(struct dma_device *device, - struct dma_chan *chan, - int chan_id) + struct dma_chan *chan) { int rc = 0; - int chancnt = device->chancnt; - atomic_t *idr_ref; - struct dma_chan *tchan; - - tchan = list_first_entry_or_null(&device->channels, - struct dma_chan, device_node); - if (!tchan) - return -ENODEV; - - if (tchan->dev) { - idr_ref = tchan->dev->idr_ref; - } else { - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; - atomic_set(idr_ref, 0); - } chan->local = alloc_percpu(typeof(*chan->local)); if (!chan->local) @@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device, * When the chan_id is a negative value, we are dynamically adding * the channel. Otherwise we are static enumerating. */ - chan->chan_id = chan_id < 0 ? chancnt : chan_id; + mutex_lock(&device->chan_mutex); + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); + mutex_unlock(&device->chan_mutex); + if (chan->chan_id < 0) { + pr_err("%s: unable to alloc ida for chan: %d\n", + __func__, chan->chan_id); + goto err_out; + } + chan->dev->device.class = &dma_devclass; chan->dev->device.parent = device->dev; chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id); - rc = device_register(&chan->dev->device); if (rc) - goto err_out; + goto err_out_ida; chan->client_count = 0; - device->chancnt = chan->chan_id + 1; + device->chancnt++; return 0; + err_out_ida: + mutex_lock(&device->chan_mutex); + ida_free(&device->chan_ida, chan->chan_id); + mutex_unlock(&device->chan_mutex); err_out: free_percpu(chan->local); kfree(chan->dev); - if (atomic_dec_return(idr_ref) == 0) - kfree(idr_ref); return rc; } @@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device, { int rc; - rc = __dma_async_device_channel_register(device, chan, -1); + rc = __dma_async_device_channel_register(device, chan); if (rc < 0) return rc; @@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, device->chancnt--; chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); + mutex_lock(&device->chan_mutex); + ida_free(&device->chan_ida, chan->chan_id); + mutex_unlock(&device->chan_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } @@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); */ int dma_async_device_register(struct dma_device *device) { - int rc, i = 0; + int rc; struct dma_chan* chan; if (!device) @@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device) if (rc != 0) return rc; + mutex_init(&device->chan_mutex); + ida_init(&device->chan_ida); + /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = __dma_async_device_channel_register(device, chan, i++); + rc = __dma_async_device_channel_register(device, chan); if (rc < 0) goto err_out; } @@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device) */ dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + ida_free(&dma_ida, device->dev_id); dma_device_put(device); mutex_unlock(&dma_list_mutex); } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a2cadfa2e6d7..364dd34799d4 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info) struct dmatest_thread *thread; list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) + if (!thread->done && !thread->pending) return true; } } @@ -662,8 +662,8 @@ static int dmatest_func(void *data) flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); - while (!kthread_should_stop() - && !(params->iterations && total_tests >= params->iterations)) { + while (!(kthread_should_stop() || + (params->iterations && total_tests >= params->iterations))) { struct dma_async_tx_descriptor *tx = NULL; struct dmaengine_unmap_data *um; dma_addr_t *dsts; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 10117f271b12..d683232d7fea 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); tdmac->desc_arr = NULL; + if (tdmac->status == DMA_ERROR) + tdmac->status = DMA_COMPLETE; return; } @@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( if (!desc) goto err_out; - mmp_tdma_config_write(chan, direction, &tdmac->slave_config); + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) + goto err_out; while (buf < buf_len) { desc = &tdmac->desc_arr[i]; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 581e7a290d98..a3b0b4c56a19 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev, } pci_set_master(pdev); + pd->dma.dev = &pdev->dev; err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); if (err) { @@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_free_irq; } - pd->dma.dev = &pdev->dev; INIT_LIST_HEAD(&pd->dma.channels); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index f6a2f42ffc51..b9f0d9636620 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc) static void tegra_dma_synchronize(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + int err; + + err = pm_runtime_get_sync(tdc->tdma->dev); + if (err < 0) { + dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); + return; + } /* * CPU, which handles interrupt, could be busy in @@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc) wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); tasklet_kill(&tdc->tasklet); + + pm_runtime_put(tdc->tdma->dev); } static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c index d7b965049ccb..fb7c8150b0d1 100644 --- a/drivers/dma/ti/k3-psil.c +++ b/drivers/dma/ti/k3-psil.c @@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) soc_ep_map = &j721e_ep_map; } else { pr_err("PSIL: No compatible machine found for map\n"); + mutex_unlock(&ep_map_mutex); return ERR_PTR(-ENOTSUPP); } pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index aecd5a35a296..5429497d3560 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, return ret; spin_lock_irqsave(&chan->lock, flags); - - desc = list_last_entry(&chan->active_list, - struct xilinx_dma_tx_descriptor, node); - /* - * VDMA and simple mode do not support residue reporting, so the - * residue field will always be 0. - */ - if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) - residue = xilinx_dma_get_residue(chan, desc); - + if (!list_empty(&chan->active_list)) { + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); + /* + * VDMA and simple mode do not support residue reporting, so the + * residue field will always be 0. + */ + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) + residue = xilinx_dma_get_residue(chan, desc); + } spin_unlock_irqrestore(&chan->lock, flags); dma_set_residue(txstate, residue); diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 116707a075f3..1d2e5b85d7ca 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -12,7 +12,7 @@ config IMX_DSP config IMX_SCU bool "IMX SCU Protocol driver" - depends on IMX_MBOX || COMPILE_TEST + depends on IMX_MBOX help The System Controller Firmware (SCFW) is a low-level system function which runs on a dedicated Cortex-M core to provide power, clock, and @@ -24,6 +24,6 @@ config IMX_SCU config IMX_SCU_PD bool "IMX SCU Power Domain driver" - depends on IMX_SCU || COMPILE_TEST + depends on IMX_SCU help The System Controller Firmware (SCFW) based power domain driver. diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 89ca292236ad..538755062ab7 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -248,11 +248,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) return ret; ret = pci_enable_sriov(pcidev, num_vfs); - if (ret) + if (ret) { dfl_fpga_cdev_config_ports_pf(cdev); + return ret; + } } - return ret; + return num_vfs; } static void cci_pci_remove(struct pci_dev *pcidev) diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index ee7765049607..07fa8d9ec675 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -583,7 +583,8 @@ static int zynq_fpga_probe(struct platform_device *pdev) priv->clk = devm_clk_get(dev, "ref_clk"); if (IS_ERR(priv->clk)) { - dev_err(dev, "input clock not found\n"); + if (PTR_ERR(priv->clk) != -EPROBE_DEFER) + dev_err(dev, "input clock not found\n"); return PTR_ERR(priv->clk); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8ea86ffdea0d..466bfe541e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -85,9 +85,10 @@ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask * - 3.36.0 - Allow reading more status registers on si/cik + * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 37 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index 074a9a09c0a7..a5b60c9a2418 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -73,6 +73,22 @@ #define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_BARRIER_OR 0 +#define SDMA_GCR_RANGE_IS_PA (1 << 18) +#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) +#define SDMA_GCR_GL2_WB (1 << 15) +#define SDMA_GCR_GL2_INV (1 << 14) +#define SDMA_GCR_GL2_DISCARD (1 << 13) +#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) +#define SDMA_GCR_GL2_US (1 << 10) +#define SDMA_GCR_GL1_INV (1 << 9) +#define SDMA_GCR_GLV_INV (1 << 8) +#define SDMA_GCR_GLK_INV (1 << 7) +#define SDMA_GCR_GLK_WB (1 << 6) +#define SDMA_GCR_GLM_INV (1 << 5) +#define SDMA_GCR_GLM_WB (1 << 4) +#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) +#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) + /*define for op field*/ #define SDMA_PKT_HEADER_op_offset 0 #define SDMA_PKT_HEADER_op_mask 0x000000FF diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index ebfd2cdf4e65..d2840c2f6286 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); + /* Invalidate L2, because if we don't do it, we might get stale cache + * lines from previous IBs. + */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | + SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | + SDMA_GCR_GLM_WB) << 16); + amdgpu_ring_write(ring, 0xffffff80); + amdgpu_ring_write(ring, 0xffff); + /* An IB packet must end on a 8 DW boundary--the next dword * must be on a 8-dword boundary. Our IB packet below is 6 * dwords long, thus add x number of NOPs, such that, in @@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, .emit_fence = sdma_v5_0_ring_emit_fence, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c5ba5d46a148..94c29b74c086 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3340,7 +3340,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, const union dc_tiling_info *tiling_info, const uint64_t info, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -3352,6 +3353,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, memset(&input, 0, sizeof(input)); memset(&output, 0, sizeof(output)); + if (force_disable_dcc) + return 0; + if (!offset) return 0; @@ -3401,7 +3405,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, union dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = &afb->base; int ret; @@ -3507,7 +3512,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, ret = fill_plane_dcc_attributes(adev, afb, format, rotation, plane_size, tiling_info, - tiling_flags, dcc, address); + tiling_flags, dcc, address, + force_disable_dcc); if (ret) return ret; } @@ -3599,7 +3605,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, const uint64_t tiling_flags, struct dc_plane_info *plane_info, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -3681,7 +3688,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, - &plane_info->dcc, address); + &plane_info->dcc, address, + force_disable_dcc); if (ret) return ret; @@ -3704,6 +3712,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_plane_info plane_info; uint64_t tiling_flags; int ret; + bool force_disable_dcc = false; ret = fill_dc_scaling_info(plane_state, &scaling_info); if (ret) @@ -3718,9 +3727,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, if (ret) return ret; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, &plane_info, - &dc_plane_state->address); + &dc_plane_state->address, + force_disable_dcc); if (ret) return ret; @@ -5342,6 +5353,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, uint64_t tiling_flags; uint32_t domain; int r; + bool force_disable_dcc = false; dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_new = to_dm_plane_state(new_state); @@ -5400,11 +5412,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, - &plane_state->address); + &plane_state->address, + force_disable_dcc); } return 0; @@ -6676,7 +6690,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, &bundle->plane_infos[planes_count], - &bundle->flip_addrs[planes_count].address); + &bundle->flip_addrs[planes_count].address, + false); + + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", + new_plane_state->plane->index, + bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; @@ -8096,7 +8115,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, plane_info, - &flip_addr->address); + &flip_addr->address, + false); if (ret) goto cleanup; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7cbb1efb4f68..27a7d2a58079 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2911,6 +2911,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.blank_stream(pipe_ctx); + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) break; } @@ -2927,6 +2933,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_reallocate_mst_payload(link); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings); + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -4227,6 +4239,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) void dpcd_set_source_specific_data(struct dc_link *link) { const uint32_t post_oui_delay = 30; // 30ms + uint8_t dspc = 0; + enum dc_status ret = DC_ERROR_UNEXPECTED; + + ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc, + sizeof(dspc)); + + if (ret != DC_OK) { + DC_LOG_ERROR("Error in DP aux read transaction," + " not writing source specific data\n"); + return; + } + + /* Return if OUI unsupported */ + if (!(dspc & DP_OUI_SUPPORT)) + return; if (!link->dc->vendor_signature.is_valid) { struct dpcd_amd_signature amd_signature; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6ddbb00ed37a..4f0e7203dba4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status( return dc_stream_get_status_from_state(dc->current_state, stream); } -static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) -{ -#if defined(CONFIG_DRM_AMD_DC_DCN) - unsigned int vupdate_line; - unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; - struct dc_stream_state *stream = pipe_ctx->stream; - unsigned int us_per_line; - - if (stream->ctx->asic_id.chip_family == FAMILY_RV && - ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { - - vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); - if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) - return; - - if (vpos >= vupdate_line) - return; - - us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz; - lines_to_vupdate = vupdate_line - vpos; - us_to_vupdate = lines_to_vupdate * us_per_line; - - /* 70 us is a conservative estimate of cursor update time*/ - if (us_to_vupdate < 70) - udelay(us_to_vupdate); - } -#endif -} /** * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address @@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - - delay_cursor_until_vupdate(pipe_ctx, dc); - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes( } if (pipe_to_program) - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); + dc->hwss.cursor_lock(dc, pipe_to_program, false); return true; } @@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - - delay_cursor_until_vupdate(pipe_ctx, dc); - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); } if (pipe_to_program) - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); + dc->hwss.cursor_lock(dc, pipe_to_program, false); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index c279982947e1..10527593868c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = { .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, .interdependent_update_lock = NULL, + .cursor_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, .optimize_bandwidth = dce110_optimize_bandwidth, .set_drr = set_drr, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index b0357546471b..085c1a39b313 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1625,6 +1625,16 @@ void dcn10_pipe_control_lock( hws->funcs.verify_allow_pstate_change_high(dc); } +void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) +{ + /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ + if (!pipe || pipe->top_pipe) + return; + + dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, + pipe->stream_res.opp->inst, lock); +} + static bool wait_for_reset_trigger_to_occur( struct dc_context *dc_ctx, struct timing_generator *tg) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 16a50e05ffbf..af51424315d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -49,6 +49,7 @@ void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock); +void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock); void dcn10_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index dd02d3983695..700509bdf503 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn10_disable_plane, .pipe_control_lock = dcn10_pipe_control_lock, + .cursor_lock = dcn10_cursor_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .prepare_bandwidth = dcn10_prepare_bandwidth, .optimize_bandwidth = dcn10_optimize_bandwidth, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 04f863499cfb..3fcd408e9103 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); + /* Configure VUPDATE lock set for this MPCC to map to the OPP */ + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id); + /* update mpc tree mux setting */ if (tree->opp_list == insert_above_mpcc) { /* insert the toppest mpcc */ @@ -318,6 +321,7 @@ void mpc1_remove_mpcc( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); /* mark this mpcc as not in use */ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); @@ -328,6 +332,7 @@ void mpc1_remove_mpcc( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); } } @@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc) REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); } @@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); @@ -453,6 +460,13 @@ void mpc1_read_mpcc_state( MPCC_BUSY, &s->busy); } +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); +} + static const struct mpc_funcs dcn10_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, @@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .update_blending = mpc1_update_blending, + .cursor_lock = mpc1_cursor_lock, .set_denorm = NULL, .set_denorm_clamp = NULL, .set_output_csc = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h index 962a68e322ee..66a4719c22a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h @@ -39,11 +39,12 @@ SRII(MPCC_BG_G_Y, MPCC, inst),\ SRII(MPCC_BG_R_CR, MPCC, inst),\ SRII(MPCC_BG_B_CB, MPCC, inst),\ - SRII(MPCC_BG_B_CB, MPCC, inst),\ - SRII(MPCC_SM_CONTROL, MPCC, inst) + SRII(MPCC_SM_CONTROL, MPCC, inst),\ + SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ - SRII(MUX, MPC_OUT, inst) + SRII(MUX, MPC_OUT, inst),\ + VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) #define MPC_COMMON_REG_VARIABLE_LIST \ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ @@ -55,7 +56,9 @@ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ - uint32_t MUX[MAX_OPP]; + uint32_t MUX[MAX_OPP]; \ + uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \ + uint32_t CUR[MAX_OPP]; #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ @@ -78,7 +81,8 @@ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh) + SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\ + SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh) #define MPC_REG_FIELD_LIST(type) \ type MPCC_TOP_SEL;\ @@ -101,7 +105,9 @@ type MPCC_SM_FIELD_ALT;\ type MPCC_SM_FORCE_NEXT_FRAME_POL;\ type MPCC_SM_FORCE_NEXT_TOP_POL;\ - type MPC_OUT_MUX; + type MPC_OUT_MUX;\ + type MPCC_UPDATE_LOCK_SEL;\ + type CUR_VUPDATE_LOCK_SET; struct dcn_mpc_registers { MPC_COMMON_REG_VARIABLE_LIST @@ -192,4 +198,6 @@ void mpc1_read_mpcc_state( int mpcc_inst, struct mpcc_state *s); +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 07265ca7d28c..ba849aa31e6e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id { .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## 0 ## _ ## block ## id + +/* set field/register/bitfield name */ +#define SFRB(field_name, reg_name, bitfield, post_fix)\ + .field_name = reg_name ## __ ## bitfield ## post_fix + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg @@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = { }; static const struct dcn_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) }; static const struct dcn_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), + MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) }; #define tg_regs(id)\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 22f421e82733..a023a4d59f41 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc) REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); + if (REG(REFCLK_CNTL)) + REG_WRITE(REFCLK_CNTL, 0); // diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 1e73357eda34..6a21228893ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index de9c857ab3e9..570dfd9a243f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = { .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, + .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, .wait_for_idle = mpc2_assert_idle_mpcc, .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h index c78fd5123497..496658f420db 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h @@ -179,7 +179,8 @@ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) /* * DCN2 MPC_OCSC debug status register: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 5cdbba0cd873..c3535bd3965b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id { .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index b9ff9767e08f..707ce0f28fab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index b25484aa8222..a721bb401ef0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .dram_channel_width_bytes = 4, .fabric_datapath_to_dcn_data_return_bytes = 32, .dcn_downspread_percent = 0.5, - .downspread_percent = 0.5, + .downspread_percent = 0.38, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, @@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF0_BASE__INST0_SEG ## seg @@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param { struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; - unsigned int i, j, k; - int closest_clk_lvl; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, j, closest_clk_lvl; // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) { + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; - /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */ - dcn2_1_soc.clock_limits[0].state = 0; - dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz; - dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz; - dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz; - dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2; - - /* - * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk - * as indicator - */ - - closest_clk_lvl = -1; - /* index currently being filled */ - k = 1; - for (i = 1; i < clk_table->num_entries; i++) { - /* loop backwards, skip duplicate state*/ - for (j = dcn2_1_soc.num_states - 1; j >= k; j--) { + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { closest_clk_lvl = j; break; } } - /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/ - if (closest_clk_lvl != -1) { - dcn2_1_soc.clock_limits[k].state = i; - dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz; - dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - k++; - } + clock_limits[i].state = i; + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + for (i = 0; i < clk_table->num_entries; i++) + dcn2_1_soc.clock_limits[i] = clock_limits[i]; + if (clk_table->num_entries) { + dcn2_1_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; } - dcn2_1_soc.num_states = k; } - /* duplicate last level */ - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; - dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 094afc4c8173..50ee8aa7ec3b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -210,6 +210,22 @@ struct mpc_funcs { struct mpcc_blnd_cfg *blnd_cfg, int mpcc_id); + /* + * Lock cursor updates for the specified OPP. + * OPP defines the set of MPCC that are locked together for cursor. + * + * Parameters: + * [in] mpc - MPC context. + * [in] opp_id - The OPP to lock cursor updates on + * [in] lock - lock/unlock the OPP + * + * Return: void + */ + void (*cursor_lock)( + struct mpc *mpc, + int opp_id, + bool lock); + struct mpcc* (*get_mpcc_for_dpp)( struct mpc_tree *tree, int dpp_id); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d4c1fb242c63..e57467d99d66 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -86,6 +86,7 @@ struct hw_sequencer_funcs { struct dc_state *context, bool lock); void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, bool flip_immediate); + void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock); /* Timing Related */ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 2a12614a12c2..e4e5a53b2b4e 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->get_asic_baco_capability) return 0; mutex_lock(&hwmgr->smu_lock); @@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state) if (!hwmgr) return -EINVAL; - if (!(hwmgr->not_vf && amdgpu_dpm) || - !hwmgr->hwmgr_func->get_asic_baco_state) + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) return 0; mutex_lock(&hwmgr->smu_lock); @@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->set_asic_baco_state) return 0; mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 283615e44838..9d89ebf3a749 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) - ret = -EIO; + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + ret = -EIO; + else + ret = size; + } kfree(txmsg); fail_put: diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 116451101426..4ede08a84e37 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d struct drm_display_mode *mode; unsigned pixel_clock = (timings->pixel_clock[0] | (timings->pixel_clock[1] << 8) | - (timings->pixel_clock[2] << 16)); + (timings->pixel_clock[2] << 16)) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 37f77aee1212..0158e49bf9bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode, unsigned int stride) { struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - struct i915_vma *vma; + struct i915_vma *vma, *vn; + LIST_HEAD(unbind); int ret = 0; if (tiling_mode == I915_TILING_NONE) return 0; mutex_lock(&ggtt->vm.mutex); + + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { + GEM_BUG_ON(vma->vm != &ggtt->vm); + if (i915_vma_fence_prepare(vma, tiling_mode, stride)) continue; + list_move(&vma->vm_link, &unbind); + } + spin_unlock(&obj->vma.lock); + + list_for_each_entry_safe(vma, vn, &unbind, vm_link) { ret = __i915_vma_unbind(vma); - if (ret) + if (ret) { + /* Restore the remaining vma on an error */ + list_splice(&unbind, &ggtt->vm.bound_list); break; + } } + mutex_unlock(&ggtt->vm.mutex); return ret; @@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, } mutex_unlock(&obj->mm.lock); + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { vma->fence_size = i915_gem_fence_size(i915, vma->size, tiling, stride); @@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, if (vma->fence) vma->fence->dirty = true; } + spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 2d0fd50c5312..d4f94ca9ae0d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg) unsigned int page_size = BIT(first); obj = i915_gem_object_create_internal(dev_priv, page_size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { @@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg) } obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 91debbc97c9a..08b56d7ab4f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from, rcu_read_lock(); cl = rcu_dereference(from->hwsp_cacheline); + if (i915_request_completed(from)) /* confirm cacheline is valid */ + goto unlock; if (unlikely(!i915_active_acquire_if_busy(&cl->active))) goto unlock; /* seqno wrapped and completed! */ if (unlikely(i915_request_completed(from))) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9f0653cf0510..d91557d842dc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; @@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_misc_masked |= GEN8_DE_MISC_GSE; if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; - } else { - de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } if (INTEL_GEN(dev_priv) >= 11) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 08699fa069aa..82e3bc280622 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj, GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); + spin_lock(&obj->vma.lock); + if (i915_is_ggtt(vm)) { if (unlikely(overflows_type(vma->size, u32))) - goto err_vma; + goto err_unlock; vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, i915_gem_object_get_tiling(obj), i915_gem_object_get_stride(obj)); if (unlikely(vma->fence_size < vma->size || /* overflow */ vma->fence_size > vm->total)) - goto err_vma; + goto err_unlock; GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); @@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj, __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } - spin_lock(&obj->vma.lock); - rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { @@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj, return vma; +err_unlock: + spin_unlock(&obj->vma.lock); err_vma: i915_vma_free(vma); return ERR_PTR(-E2BIG); diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index d1086b2a6892..05863b253d68 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, return ret; ret = qxl_release_reserve_list(release, true); - if (ret) + if (ret) { + qxl_release_free(qdev, release); return ret; - + } cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; cmd->flags = QXL_SURF_FLAG_KEEP_DATA; @@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 09583a08e141..91f398d51cfa 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane) cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return ret; @@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.position.y = plane->state->crtc_y + fb->hot_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); if (old_cursor_bo != NULL) qxl_bo_unpin(old_cursor_bo); @@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); } static void qxl_update_dumb_head(struct qxl_device *qdev, diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 5bebf1ea1c5d..3599db096973 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, goto out_release_backoff; rects = drawable_set_clipping(qdev, num_clips, clips_bo); - if (!rects) + if (!rects) { + ret = -EINVAL; goto out_release_backoff; - + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; @@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, } qxl_bo_kunmap(clips_bo); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_release_backoff: if (ret) diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 43688ecdd8a0..60ab7151b84d 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev, break; default: DRM_ERROR("unsupported image bit depth\n"); - return -EINVAL; /* TODO: cleanup */ + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; } image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.x = width; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 8117a45b3610..72f3f1bbb40c 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, apply_surf_reloc(qdev, &reloc_info[i]); } + qxl_release_fence_buffer_objects(release); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); - if (ret) - qxl_release_backoff_reserve_list(release); - else - qxl_release_fence_buffer_objects(release); out_free_bos: out_free_release: diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index f4ea4cef5e23..0a5c8cf409fb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) events_clear, &events_clear); } -static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev, - uint32_t ctx_id) -{ - virtio_gpu_cmd_context_destroy(vgdev, ctx_id); - virtio_gpu_notify(vgdev); - ida_free(&vgdev->ctx_id_ida, ctx_id - 1); -} - static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, void (*work_func)(struct work_struct *work)) { @@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_fpriv *vfpriv; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; if (!vgdev->has_virgl_3d) return; - vfpriv = file->driver_priv; + if (vfpriv->context_created) { + virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); + virtio_gpu_notify(vgdev); + } - virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); + ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); mutex_destroy(&vfpriv->context_lock); kfree(vfpriv); file->driver_priv = NULL; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 7c89edbd6c5a..34f07371716d 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -1155,6 +1155,7 @@ config HID_ALPS config HID_MCP2221 tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support" depends on USB_HID && I2C + depends on GPIOLIB ---help--- Provides I2C and SMBUS host adapter functionality over USB-HID through MCP2221 device. diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index fa704153cb00..b2ad319a74b9 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -802,6 +802,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) break; case HID_DEVICE_ID_ALPS_U1_DUAL: case HID_DEVICE_ID_ALPS_U1: + case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY: data->dev_type = U1; break; default: diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b18b13147a6f..1c71a1aa76b2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -79,10 +79,10 @@ #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 #define HID_DEVICE_ID_ALPS_U1 0x1215 +#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C #define HID_DEVICE_ID_ALPS_1222 0x1222 - #define USB_VENDOR_ID_AMI 0x046b #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 @@ -385,6 +385,7 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 #define USB_VENDOR_ID_ELAN 0x04f3 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 @@ -759,6 +760,7 @@ #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 #define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222 +#define USB_DEVICE_ID_LOGITECH_G11 0xc225 #define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227 #define USB_DEVICE_ID_LOGITECH_G510 0xc22d #define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e @@ -1097,6 +1099,9 @@ #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 +#define I2C_VENDOR_ID_SYNAPTICS 0x06cb +#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13 + #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 @@ -1111,6 +1116,7 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c index ad4b5412a9f4..ef0cbcd7540d 100644 --- a/drivers/hid/hid-lg-g15.c +++ b/drivers/hid/hid-lg-g15.c @@ -872,6 +872,10 @@ error_hw_stop: } static const struct hid_device_id lg_g15_devices[] = { + /* The G11 is a G15 without the LCD, treat it as a G15 */ + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_G11), + .driver_data = LG_G15 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G15_LCD), .driver_data = LG_G15 }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 362805ddf377..03c720b47306 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1922,6 +1922,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + { .driver_data = MT_CLS_EGALAX, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index ebec818344af..e4cb543de0cd 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 009000c5d55c..294c84e136d7 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -177,6 +177,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_BOGUS_IRQ }, { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, I2C_HID_QUIRK_RESET_ON_RESUME }, + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393, + I2C_HID_QUIRK_RESET_ON_RESUME }, { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index c7bc9db5b192..17a638f15082 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + mutex_lock(&usbhid->mutex); + set_bit(HID_OPENED, &usbhid->iofl); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return 0; + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { + res = 0; + goto Done; + } res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { clear_bit(HID_OPENED, &usbhid->iofl); - return -EIO; + res = -EIO; + goto Done; } usbhid->intf->needs_remote_wakeup = 1; @@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid) msleep(50); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + + Done: + mutex_unlock(&usbhid->mutex); return res; } @@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; + mutex_lock(&usbhid->mutex); + /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing @@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid) clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { + hid_cancel_delayed_stuff(usbhid); + usb_kill_urb(usbhid->urbin); + usbhid->intf->needs_remote_wakeup = 0; + } - hid_cancel_delayed_stuff(usbhid); - usb_kill_urb(usbhid->urbin); - usbhid->intf->needs_remote_wakeup = 0; + mutex_unlock(&usbhid->mutex); } /* @@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid) unsigned int n, insize = 0; int ret; + mutex_lock(&usbhid->mutex); + clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; @@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid) usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } + + mutex_unlock(&usbhid->mutex); return 0; fail: @@ -1187,6 +1202,7 @@ fail: usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); + mutex_unlock(&usbhid->mutex); return ret; } @@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->intf->needs_remote_wakeup = 0; } + mutex_lock(&usbhid->mutex); + clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); @@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); + + mutex_unlock(&usbhid->mutex); } static int usbhid_power(struct hid_device *hid, int lvl) @@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * INIT_WORK(&usbhid->reset_work, hid_reset); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); spin_lock_init(&usbhid->lock); + mutex_init(&usbhid->mutex); ret = hid_add_device(hid); if (ret) { diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 8620408bd7af..75fe85d3d27a 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -80,6 +80,7 @@ struct usbhid_device { dma_addr_t outbuf_dma; /* Output buffer dma */ unsigned long last_out; /* record of last output for timeouts */ + struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ struct timer_list io_retry; /* Retry timer */ diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 5ded94b7bf68..cd71e7133944 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); - if (ret == n) { + if (ret == n && features->type == HID_GENERIC) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); + } else if (ret == 2 && features->type != HID_GENERIC) { + features->touch_max = data[1]; } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index d99a9d407671..1c96809b51c9 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) { struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; + int nbuttons = wacom->features.numbered_buttons; - int buttons = data[282] | ((data[281] & 0x40) << 2); + int expresskeys = data[282]; + int center = (data[281] & 0x40) >> 6; int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; - bool prox = buttons || ringstatus; + bool prox = expresskeys || center || ringstatus; /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ ring = 71 - ring; @@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) if (ring > 71) ring -= 72; - wacom_report_numbered_buttons(pad_input, 9, buttons); + wacom_report_numbered_buttons(pad_input, nbuttons, + expresskeys | (center << (nbuttons - 1))); input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); @@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, case HID_DG_TIPSWITCH: hid_data->last_slot_field = equivalent_usage; break; + case HID_DG_CONTACTCOUNT: + hid_data->cc_report = report->id; + hid_data->cc_index = i; + hid_data->cc_value_index = j; + break; } } } + + if (hid_data->cc_report != 0 && + hid_data->cc_index >= 0) { + struct hid_field *field = report->field[hid_data->cc_index]; + int value = field->value[hid_data->cc_value_index]; + if (value) + hid_data->num_expected = value; + } + else { + hid_data->num_expected = wacom_wac->features.touch_max; + } } static void wacom_wac_finger_report(struct hid_device *hdev, @@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; - struct hid_data *hid_data = &wacom_wac->hid_data; /* If more packets of data are expected, give us a chance to * process them rather than immediately syncing a partial @@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, input_sync(input); wacom_wac->hid_data.num_received = 0; - hid_data->num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); @@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev, } } -static void wacom_set_num_expected(struct hid_device *hdev, - struct hid_report *report, - int collection_index, - struct hid_field *field, - int field_index) -{ - struct wacom *wacom = hid_get_drvdata(hdev); - struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct hid_data *hid_data = &wacom_wac->hid_data; - unsigned int original_collection_level = - hdev->collection[collection_index].level; - bool end_collection = false; - int i; - - if (hid_data->num_expected) - return; - - // find the contact count value for this segment - for (i = field_index; i < report->maxfield && !end_collection; i++) { - struct hid_field *field = report->field[i]; - unsigned int field_level = - hdev->collection[field->usage[0].collection_index].level; - unsigned int j; - - if (field_level != original_collection_level) - continue; - - for (j = 0; j < field->maxusage; j++) { - struct hid_usage *usage = &field->usage[j]; - - if (usage->collection_index != collection_index) { - end_collection = true; - break; - } - if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) { - hid_data->cc_report = report->id; - hid_data->cc_index = i; - hid_data->cc_value_index = j; - - if (hid_data->cc_report != 0 && - hid_data->cc_index >= 0) { - - struct hid_field *field = - report->field[hid_data->cc_index]; - int value = - field->value[hid_data->cc_value_index]; - - if (value) - hid_data->num_expected = value; - } - } - } - } - - if (hid_data->cc_report == 0 || hid_data->cc_index < 0) - hid_data->num_expected = wacom_wac->features.touch_max; -} - static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, int collection_index, struct hid_field *field, int field_index) { struct wacom *wacom = hid_get_drvdata(hdev); - if (WACOM_FINGER_FIELD(field)) - wacom_set_num_expected(hdev, report, collection_index, field, - field_index); wacom_report_events(hdev, report, collection_index, field_index); /* diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 6098e0cbdb4b..533c8b82b344 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu) shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; shared_sint.masked = false; - if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED) - shared_sint.auto_eoi = false; - else - shared_sint.auto_eoi = true; - + shared_sint.auto_eoi = hv_recommend_using_aeoi(); hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); /* Enable the global synic bit */ diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h index e70783e33680..f9d14db980cb 100644 --- a/drivers/hv/hv_trace.h +++ b/drivers/hv/hv_trace.h @@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request, __field(int, ret) ), TP_fast_assign( - memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16); - memcpy(__entry->host_id, &msg->host_service_id.b, 16); + export_guid(__entry->guest_id, &msg->guest_endpoint_id); + export_guid(__entry->host_id, &msg->host_service_id); __entry->ret = ret; ), TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, " diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a68bce4d0ddb..e06c6b9555cf 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device) return drv->resume(dev); } +#else +#define vmbus_suspend NULL +#define vmbus_resume NULL #endif /* CONFIG_PM_SLEEP */ /* @@ -997,11 +1000,22 @@ static void vmbus_device_release(struct device *device) } /* - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than - * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm. + * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. + * + * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we + * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there + * is no way to wake up a Generation-2 VM. + * + * The other 4 ops are for hibernation. */ + static const struct dev_pm_ops vmbus_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume) + .suspend_noirq = NULL, + .resume_noirq = NULL, + .freeze_noirq = vmbus_suspend, + .thaw_noirq = vmbus_resume, + .poweroff_noirq = vmbus_suspend, + .restore_noirq = vmbus_resume, }; /* The one and only one */ @@ -2281,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev) return 0; } +#else +#define vmbus_bus_suspend NULL +#define vmbus_bus_resume NULL #endif /* CONFIG_PM_SLEEP */ static const struct acpi_device_id vmbus_acpi_device_ids[] = { @@ -2291,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = { MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); /* - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than - * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the - * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the - * pci "noirq" restore callback runs before "non-noirq" callbacks (see + * Note: we must use the "no_irq" ops, otherwise hibernation can not work with + * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in + * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's - * resume callback must also run via the "noirq" callbacks. + * resume callback must also run via the "noirq" ops. + * + * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment + * earlier in this file before vmbus_pm. */ + static const struct dev_pm_ops vmbus_bus_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume) + .suspend_noirq = NULL, + .resume_noirq = NULL, + .freeze_noirq = vmbus_bus_suspend, + .thaw_noirq = vmbus_bus_resume, + .poweroff_noirq = vmbus_bus_suspend, + .restore_noirq = vmbus_bus_resume }; static struct acpi_driver vmbus_acpi_driver = { diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index 5e4800d72e00..cd3fd5ee5f65 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev, if (!privdata) return -ENOMEM; + privdata->pci_dev = pci_dev; rc = amd_mp2_pci_init(privdata, pci_dev); if (rc) return rc; mutex_init(&privdata->c2p_lock); - privdata->pci_dev = pci_dev; pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); pm_runtime_use_autosuspend(&pci_dev->dev); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 07c1993274c5..f51702d86a90 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) /* Ack all interrupts except for Rx done */ writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); + readl(bus->base + ASPEED_I2C_INTR_STS_REG); irq_remaining = irq_received; #if IS_ENABLED(CONFIG_I2C_SLAVE) @@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) irq_received, irq_handled); /* Ack Rx done */ - if (irq_received & ASPEED_I2CD_INTR_RX_DONE) + if (irq_received & ASPEED_I2CD_INTR_RX_DONE) { writel(ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); + readl(bus->base + ASPEED_I2C_INTR_STS_REG); + } spin_unlock(&bus->lock); return irq_remaining ? IRQ_NONE : IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 44be0926b566..d091a12596ad 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &value); + if (rx_status == I2C_SLAVE_RX_END) + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_STOP, &value); } } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { /* Master read other than start */ diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 8280ac7cc1b7..4c4d17ddc96b 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -996,13 +996,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev, do { u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); - if (status) + if (status) { tegra_i2c_isr(i2c_dev->irq, i2c_dev); - if (completion_done(complete)) { - s64 delta = ktime_ms_delta(ktimeout, ktime); + if (completion_done(complete)) { + s64 delta = ktime_ms_delta(ktimeout, ktime); - return msecs_to_jiffies(delta) ?: 1; + return msecs_to_jiffies(delta) ?: 1; + } } ktime = ktime_get(); @@ -1029,18 +1030,14 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, disable_irq(i2c_dev->irq); /* - * Under some rare circumstances (like running KASAN + - * NFS root) CPU, which handles interrupt, may stuck in - * uninterruptible state for a significant time. In this - * case we will get timeout if I2C transfer is running on - * a sibling CPU, despite of IRQ being raised. - * - * In order to handle this rare condition, the IRQ status - * needs to be checked after timeout. + * There is a chance that completion may happen after IRQ + * synchronization, which is done by disable_irq(). */ - if (ret == 0) - ret = tegra_i2c_poll_completion_timeout(i2c_dev, - complete, 0); + if (ret == 0 && completion_done(complete)) { + dev_warn(i2c_dev->dev, + "completion done after timeout\n"); + ret = 1; + } } return ret; @@ -1219,15 +1216,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, time_left = tegra_i2c_wait_completion_timeout( i2c_dev, &i2c_dev->dma_complete, xfer_time); - /* - * Synchronize DMA first, since dmaengine_terminate_sync() - * performs synchronization after the transfer's termination - * and we want to get a completion if transfer succeeded. - */ - dmaengine_synchronize(i2c_dev->msg_read ? - i2c_dev->rx_dma_chan : - i2c_dev->tx_dma_chan); - dmaengine_terminate_sync(i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan); diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 02981f3d1794..08ba1a8f05eb 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -125,10 +125,10 @@ #define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */ /* ID Register Bit Designations (AD7192_REG_ID) */ -#define ID_AD7190 0x4 -#define ID_AD7192 0x0 -#define ID_AD7193 0x2 -#define ID_AD7195 0x6 +#define CHIPID_AD7190 0x4 +#define CHIPID_AD7192 0x0 +#define CHIPID_AD7193 0x2 +#define CHIPID_AD7195 0x6 #define AD7192_ID_MASK 0x0F /* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */ @@ -161,7 +161,20 @@ enum { AD7192_SYSCALIB_FULL_SCALE, }; +enum { + ID_AD7190, + ID_AD7192, + ID_AD7193, + ID_AD7195, +}; + +struct ad7192_chip_info { + unsigned int chip_id; + const char *name; +}; + struct ad7192_state { + const struct ad7192_chip_info *chip_info; struct regulator *avdd; struct regulator *dvdd; struct clk *mclk; @@ -172,7 +185,6 @@ struct ad7192_state { u32 conf; u32 scale_avail[8][2]; u8 gpocon; - u8 devid; u8 clock_sel; struct mutex lock; /* protect sensor state */ u8 syscalib_mode[8]; @@ -348,7 +360,7 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np) id &= AD7192_ID_MASK; - if (id != st->devid) + if (id != st->chip_info->chip_id) dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n", id); @@ -363,7 +375,7 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np) st->mode |= AD7192_MODE_REJ60; refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable"); - if (refin2_en && st->devid != ID_AD7195) + if (refin2_en && st->chip_info->chip_id != CHIPID_AD7195) st->conf |= AD7192_CONF_REFSEL; st->conf &= ~AD7192_CONF_CHOP; @@ -859,12 +871,31 @@ static const struct iio_chan_spec ad7193_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(14), }; +static const struct ad7192_chip_info ad7192_chip_info_tbl[] = { + [ID_AD7190] = { + .chip_id = CHIPID_AD7190, + .name = "ad7190", + }, + [ID_AD7192] = { + .chip_id = CHIPID_AD7192, + .name = "ad7192", + }, + [ID_AD7193] = { + .chip_id = CHIPID_AD7193, + .name = "ad7193", + }, + [ID_AD7195] = { + .chip_id = CHIPID_AD7195, + .name = "ad7195", + }, +}; + static int ad7192_channels_config(struct iio_dev *indio_dev) { struct ad7192_state *st = iio_priv(indio_dev); - switch (st->devid) { - case ID_AD7193: + switch (st->chip_info->chip_id) { + case CHIPID_AD7193: indio_dev->channels = ad7193_channels; indio_dev->num_channels = ARRAY_SIZE(ad7193_channels); break; @@ -878,10 +909,10 @@ static int ad7192_channels_config(struct iio_dev *indio_dev) } static const struct of_device_id ad7192_of_match[] = { - { .compatible = "adi,ad7190", .data = (void *)ID_AD7190 }, - { .compatible = "adi,ad7192", .data = (void *)ID_AD7192 }, - { .compatible = "adi,ad7193", .data = (void *)ID_AD7193 }, - { .compatible = "adi,ad7195", .data = (void *)ID_AD7195 }, + { .compatible = "adi,ad7190", .data = &ad7192_chip_info_tbl[ID_AD7190] }, + { .compatible = "adi,ad7192", .data = &ad7192_chip_info_tbl[ID_AD7192] }, + { .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] }, + { .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] }, {} }; MODULE_DEVICE_TABLE(of, ad7192_of_match); @@ -938,16 +969,16 @@ static int ad7192_probe(struct spi_device *spi) } spi_set_drvdata(spi, indio_dev); - st->devid = (unsigned long)of_device_get_match_data(&spi->dev); + st->chip_info = of_device_get_match_data(&spi->dev); indio_dev->dev.parent = &spi->dev; - indio_dev->name = spi_get_device_id(spi)->name; + indio_dev->name = st->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; ret = ad7192_channels_config(indio_dev); if (ret < 0) goto error_disable_dvdd; - if (st->devid == ID_AD7195) + if (st->chip_info->chip_id == CHIPID_AD7195) indio_dev->info = &ad7195_info; else indio_dev->info = &ad7192_info; diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index b747db97f78a..e5691e330323 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -542,7 +542,7 @@ static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, - .attrs = &ad7793_attribute_group, + .attrs = &ad7797_attribute_group, .validate_trigger = ad_sd_validate_trigger, }; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 80c3f963527b..ae622ee6d08c 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1418,8 +1418,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc) static void stm32_adc_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; + struct stm32_adc *adc = iio_priv(indio_dev); + int residue = stm32_adc_dma_residue(adc); + + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ + dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi); - iio_trigger_poll_chained(indio_dev->trig); + while (residue >= indio_dev->scan_bytes) { + u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi]; + + iio_push_to_buffers(indio_dev, buffer); + + residue -= indio_dev->scan_bytes; + adc->bufi += indio_dev->scan_bytes; + if (adc->bufi >= adc->rx_buf_sz) + adc->bufi = 0; + } } static int stm32_adc_dma_start(struct iio_dev *indio_dev) @@ -1845,6 +1867,7 @@ static int stm32_adc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct device *dev = &pdev->dev; + irqreturn_t (*handler)(int irq, void *p) = NULL; struct stm32_adc *adc; int ret; @@ -1911,9 +1934,11 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) return ret; + if (!adc->dma_chan) + handler = &stm32_adc_trigger_handler; + ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_adc_trigger_handler, + &iio_pollfunc_store_time, handler, &stm32_adc_buffer_setup_ops); if (ret) { dev_err(&pdev->dev, "buffer setup failed\n"); diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c index 9a460807d46d..abe4b56c847c 100644 --- a/drivers/iio/adc/ti-ads8344.c +++ b/drivers/iio/adc/ti-ads8344.c @@ -29,7 +29,7 @@ struct ads8344 { struct mutex lock; u8 tx_buf ____cacheline_aligned; - u16 rx_buf; + u8 rx_buf[3]; }; #define ADS8344_VOLTAGE_CHANNEL(chan, si) \ @@ -89,11 +89,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel, udelay(9); - ret = spi_read(spi, &adc->rx_buf, 2); + ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf)); if (ret) return ret; - return adc->rx_buf; + return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7; } static int ads8344_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index ec227b358cd6..6fd06e4eff73 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500; #define XADC_FLAGS_BUFFERED BIT(0) +/* + * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does + * not have a hardware FIFO. Which means an interrupt is generated for each + * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely + * overloaded by the interrupts that it soft-lockups. For this reason the driver + * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy, + * but still responsive. + */ +#define XADC_MAX_SAMPLERATE 150000 + static void xadc_write_reg(struct xadc *xadc, unsigned int reg, uint32_t val) { @@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state) spin_lock_irqsave(&xadc->lock, flags); xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); - xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); + xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS); if (state) val |= XADC_AXI_INT_EOS; else @@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode) { uint16_t val; + /* Powerdown the ADC-B when it is not needed. */ switch (seq_mode) { case XADC_CONF1_SEQ_SIMULTANEOUS: case XADC_CONF1_SEQ_INDEPENDENT: - val = XADC_CONF2_PD_ADC_B; + val = 0; break; default: - val = 0; + val = XADC_CONF2_PD_ADC_B; break; } @@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev) if (ret) goto err; + /* + * In simultaneous mode the upper and lower aux channels are samples at + * the same time. In this mode the upper 8 bits in the sequencer + * register are don't care and the lower 8 bits control two channels + * each. As such we must set the bit if either the channel in the lower + * group or the upper group is enabled. + */ + if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS) + scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000; + ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); if (ret) goto err; @@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = { .postdisable = &xadc_postdisable, }; +static int xadc_read_samplerate(struct xadc *xadc) +{ + unsigned int div; + uint16_t val16; + int ret; + + ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); + if (ret) + return ret; + + div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; + if (div < 2) + div = 2; + + return xadc_get_dclk_rate(xadc) / div / 26; +} + static int xadc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct xadc *xadc = iio_priv(indio_dev); - unsigned int div; uint16_t val16; int ret; @@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val = -((273150 << 12) / 503975); return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: - ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); - if (ret) + ret = xadc_read_samplerate(xadc); + if (ret < 0) return ret; - div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; - if (div < 2) - div = 2; - - *val = xadc_get_dclk_rate(xadc) / div / 26; - + *val = ret; return IIO_VAL_INT; default: return -EINVAL; } } -static int xadc_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int val, int val2, long info) +static int xadc_write_samplerate(struct xadc *xadc, int val) { - struct xadc *xadc = iio_priv(indio_dev); unsigned long clk_rate = xadc_get_dclk_rate(xadc); unsigned int div; if (!clk_rate) return -EINVAL; - if (info != IIO_CHAN_INFO_SAMP_FREQ) - return -EINVAL; - if (val <= 0) return -EINVAL; /* Max. 150 kSPS */ - if (val > 150000) - val = 150000; + if (val > XADC_MAX_SAMPLERATE) + val = XADC_MAX_SAMPLERATE; val *= 26; @@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev, * limit. */ div = clk_rate / val; - if (clk_rate / div / 26 > 150000) + if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE) div++; if (div < 2) div = 2; @@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev, div << XADC_CONF2_DIV_OFFSET); } +static int xadc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, long info) +{ + struct xadc *xadc = iio_priv(indio_dev); + + if (info != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + return xadc_write_samplerate(xadc, val); +} + static const struct iio_event_spec xadc_temp_events[] = { { .type = IIO_EV_TYPE_THRESH, @@ -1223,6 +1261,21 @@ static int xadc_probe(struct platform_device *pdev) if (ret) goto err_free_samplerate_trigger; + /* + * Make sure not to exceed the maximum samplerate since otherwise the + * resulting interrupt storm will soft-lock the system. + */ + if (xadc->ops->flags & XADC_FLAGS_BUFFERED) { + ret = xadc_read_samplerate(xadc); + if (ret < 0) + goto err_free_samplerate_trigger; + if (ret > XADC_MAX_SAMPLERATE) { + ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE); + if (ret < 0) + goto err_free_samplerate_trigger; + } + } + ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); if (ret) diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 0e35ff06f9af..13bdfbbf5f71 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -79,7 +79,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); - if (!sdata->sensor_settings->odr.addr) + if (!sdata->sensor_settings->odr.mask) return 0; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c index a98ea76732e7..2d7623b9b2c0 100644 --- a/drivers/iio/dac/ad5770r.c +++ b/drivers/iio/dac/ad5770r.c @@ -525,7 +525,7 @@ static int ad5770r_channel_config(struct ad5770r_state *st) ret = fwnode_property_read_u32(child, "num", &num); if (ret) return ret; - if (num > AD5770R_MAX_CHANNELS) + if (num >= AD5770R_MAX_CHANNELS) return -EINVAL; ret = fwnode_property_read_u32_array(child, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 7cb9ff3d3e94..0b8d2f7a0165 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -1617,6 +1617,10 @@ static int __maybe_unused inv_mpu_resume(struct device *dev) if (result) goto out_unlock; + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + result = inv_mpu6050_switch_engine(st, true, st->suspended_sensors); if (result) goto out_unlock; @@ -1638,13 +1642,18 @@ static int __maybe_unused inv_mpu_suspend(struct device *dev) mutex_lock(&st->lock); + st->suspended_sensors = 0; + if (pm_runtime_suspended(dev)) { + result = 0; + goto out_unlock; + } + if (iio_buffer_enabled(indio_dev)) { result = inv_mpu6050_prepare_fifo(st, false); if (result) goto out_unlock; } - st->suspended_sensors = 0; if (st->chip_config.accl_en) st->suspended_sensors |= INV_MPU6050_SENSOR_ACCL; if (st->chip_config.gyro_en) diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index f2113a63721a..41cb20cb3809 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -337,6 +337,7 @@ enum st_lsm6dsx_fifo_mode { * @gain: Configured sensor sensitivity. * @odr: Output data rate of the sensor [Hz]. * @watermark: Sensor watermark level. + * @decimator: Sensor decimation factor. * @sip: Number of samples in a given pattern. * @ts_ref: Sensor timestamp reference for hw one. * @ext_info: Sensor settings if it is connected to i2c controller @@ -350,11 +351,13 @@ struct st_lsm6dsx_sensor { u32 odr; u16 watermark; + u8 decimator; u8 sip; s64 ts_ref; struct { const struct st_lsm6dsx_ext_dev_settings *settings; + u32 slv_odr; u8 addr; } ext_info; }; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index bb899345f2bb..afd00daeefb2 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -93,6 +93,7 @@ st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr) break; } + sensor->decimator = decimator; return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val; } @@ -337,7 +338,7 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr, int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) { struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL; - int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset; + int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset; u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE; u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask; u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE]; @@ -399,19 +400,20 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) acc_sip = acc_sensor->sip; ts_sip = hw->ts_sip; offset = 0; + sip = 0; while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) { - if (gyro_sip > 0) { + if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) { memcpy(gyro_buff, &hw->buff[offset], ST_LSM6DSX_SAMPLE_SIZE); offset += ST_LSM6DSX_SAMPLE_SIZE; } - if (acc_sip > 0) { + if (acc_sip > 0 && !(sip % acc_sensor->decimator)) { memcpy(acc_buff, &hw->buff[offset], ST_LSM6DSX_SAMPLE_SIZE); offset += ST_LSM6DSX_SAMPLE_SIZE; } - if (ext_sip > 0) { + if (ext_sip > 0 && !(sip % ext_sensor->decimator)) { memcpy(ext_buff, &hw->buff[offset], ST_LSM6DSX_SAMPLE_SIZE); offset += ST_LSM6DSX_SAMPLE_SIZE; @@ -441,18 +443,25 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) offset += ST_LSM6DSX_SAMPLE_SIZE; } - if (gyro_sip-- > 0) + if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) { iio_push_to_buffers_with_timestamp( hw->iio_devs[ST_LSM6DSX_ID_GYRO], gyro_buff, gyro_sensor->ts_ref + ts); - if (acc_sip-- > 0) + gyro_sip--; + } + if (acc_sip > 0 && !(sip % acc_sensor->decimator)) { iio_push_to_buffers_with_timestamp( hw->iio_devs[ST_LSM6DSX_ID_ACC], acc_buff, acc_sensor->ts_ref + ts); - if (ext_sip-- > 0) + acc_sip--; + } + if (ext_sip > 0 && !(sip % ext_sensor->decimator)) { iio_push_to_buffers_with_timestamp( hw->iio_devs[ST_LSM6DSX_ID_EXT0], ext_buff, ext_sensor->ts_ref + ts); + ext_sip--; + } + sip++; } } diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 84d219ae6aee..4426524b59f2 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2036,11 +2036,21 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw) return 0; } -static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw) +static int st_lsm6dsx_reset_device(struct st_lsm6dsx_hw *hw) { const struct st_lsm6dsx_reg *reg; int err; + /* + * flush hw FIFO before device reset in order to avoid + * possible races on interrupt line 1. If the first interrupt + * line is asserted during hw reset the device will work in + * I3C-only mode (if it is supported) + */ + err = st_lsm6dsx_flush_fifo(hw); + if (err < 0 && err != -ENOTSUPP) + return err; + /* device sw reset */ reg = &hw->settings->reset; err = regmap_update_bits(hw->regmap, reg->addr, reg->mask, @@ -2059,6 +2069,18 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw) msleep(50); + return 0; +} + +static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw) +{ + const struct st_lsm6dsx_reg *reg; + int err; + + err = st_lsm6dsx_reset_device(hw); + if (err < 0) + return err; + /* enable Block Data Update */ reg = &hw->settings->bdu; err = regmap_update_bits(hw->regmap, reg->addr, reg->mask, diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index 95ddd19d1aa7..64ef07a30726 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -421,7 +421,8 @@ int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable) settings = sensor->ext_info.settings; if (enable) { - err = st_lsm6dsx_shub_set_odr(sensor, sensor->odr); + err = st_lsm6dsx_shub_set_odr(sensor, + sensor->ext_info.slv_odr); if (err < 0) return err; } else { @@ -459,7 +460,7 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; - delay = 1000000000 / sensor->odr; + delay = 1000000000 / sensor->ext_info.slv_odr; usleep_range(delay, 2 * delay); len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3); @@ -500,8 +501,8 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev, iio_device_release_direct_mode(iio_dev); break; case IIO_CHAN_INFO_SAMP_FREQ: - *val = sensor->odr / 1000; - *val2 = (sensor->odr % 1000) * 1000; + *val = sensor->ext_info.slv_odr / 1000; + *val2 = (sensor->ext_info.slv_odr % 1000) * 1000; ret = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_SCALE: @@ -535,8 +536,20 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev, val = val * 1000 + val2 / 1000; err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data); - if (!err) - sensor->odr = val; + if (!err) { + struct st_lsm6dsx_hw *hw = sensor->hw; + struct st_lsm6dsx_sensor *ref_sensor; + u8 odr_val; + int odr; + + ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]); + odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val); + if (odr < 0) + return odr; + + sensor->ext_info.slv_odr = val; + sensor->odr = odr; + } break; } default: @@ -613,6 +626,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw, const struct st_lsm6dsx_ext_dev_settings *info, u8 i2c_addr, const char *name) { + enum st_lsm6dsx_sensor_id ref_id = ST_LSM6DSX_ID_ACC; struct iio_chan_spec *ext_channels; struct st_lsm6dsx_sensor *sensor; struct iio_dev *iio_dev; @@ -628,7 +642,8 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw, sensor = iio_priv(iio_dev); sensor->id = id; sensor->hw = hw; - sensor->odr = info->odr_table.odr_avl[0].milli_hz; + sensor->odr = hw->settings->odr_table[ref_id].odr_avl[0].milli_hz; + sensor->ext_info.slv_odr = info->odr_table.odr_avl[0].milli_hz; sensor->gain = info->fs_table.fs_avl[0].gain; sensor->ext_info.settings = info; sensor->ext_info.addr = i2c_addr; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 2352c426bfb5..24f7bbff4938 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -915,14 +915,11 @@ static ssize_t iio_write_channel_info(struct device *dev, return -EINVAL; integer = ch; } else { - ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract); + ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, + scale_db); if (ret) return ret; } - ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, - scale_db); - if (ret) - return ret; ret = indio_dev->info->write_raw(indio_dev, this_attr->c, integer, fract, this_attr->address); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 4794113ecd59..17f14e0eafe4 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, &cm.local_id_next, GFP_KERNEL); - if (ret) + if (ret < 0) goto error; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; @@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg, static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, - enum ib_cm_rej_reason reason, - void *ari, - u8 ari_length, - const void *private_data, - u8 private_data_len) + enum ib_cm_rej_reason reason, void *ari, + u8 ari_length, const void *private_data, + u8 private_data_len, enum ib_cm_state state) { lockdep_assert_held(&cm_id_priv->lock); @@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg, IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.remote_id)); - switch(cm_id_priv->id.state) { + switch (state) { case IB_CM_REQ_RCVD: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); @@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: - cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, - IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, + IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0, + IB_CM_TIMEWAIT); break; default: goto unlock; @@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, u8 ari_length, const void *private_data, u8 private_data_len) { + enum ib_cm_state state = cm_id_priv->id.state; struct ib_mad_send_buf *msg; int ret; @@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; - switch (cm_id_priv->id.state) { + switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: @@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, if (ret) return ret; cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, - ari, ari_length, private_data, private_data_len); + ari, ari_length, private_data, private_data_len, + state); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: @@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, if (ret) return ret; cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, - ari, ari_length, private_data, private_data_len); + ari, ari_length, private_data, private_data_len, + state); break; default: pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 5128cb16bb48..177333d8bcda 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -360,7 +360,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj, * uverbs_uobject_fd_release(), and the caller is expected to ensure * that release is never done while a call to lookup is possible. */ - if (f->f_op != fd_type->fops) { + if (f->f_op != fd_type->fops || uobject->ufile != ufile) { fput(f); return ERR_PTR(-EBADF); } @@ -474,16 +474,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj, filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, fd_type->flags); if (IS_ERR(filp)) { + uverbs_uobject_put(uobj); uobj = ERR_CAST(filp); - goto err_uobj; + goto err_fd; } uobj->object = filp; uobj->id = new_fd; return uobj; -err_uobj: - uverbs_uobject_put(uobj); err_fd: put_unused_fd(new_fd); return uobj; @@ -679,7 +678,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { assert_uverbs_usecnt(uobj, mode); - uobj->uapi_object->type_class->lookup_put(uobj, mode); /* * In order to unlock an object, either decrease its usecnt for * read access or zero it in case of exclusive access. See @@ -696,6 +694,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, break; } + uobj->uapi_object->type_class->lookup_put(uobj, mode); /* Pairs with the kref obtained by type->lookup_get */ uverbs_uobject_put(uobj); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2d4083bf4a04..17fc25db0311 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -820,6 +820,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) ret = mmget_not_zero(mm); if (!ret) { list_del_init(&priv->list); + if (priv->entry) { + rdma_user_mmap_entry_put(priv->entry); + priv->entry = NULL; + } mm = NULL; continue; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index e8b4b3743661..688f19667221 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp, u64 header; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); - if (wqe) + if (!wqe) return I40IW_ERR_RING_FULL; set_64bit_val(wqe, 32, feat_mem->pa); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a66518a5c938..275722cec8c6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules( int i; for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { + union ib_flow_spec ib_spec = {}; int ret; - union ib_flow_spec ib_spec; + switch (pdefault_rules->rules_create_list[i]) { case 0: /* no rule */ diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index af599c8b88aa..d93eec5d3277 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -5554,7 +5554,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); rdma_ah_set_static_rate(ah_attr, path->static_rate ? path->static_rate - 5 : 0); - if (path->grh_mlid & (1 << 7)) { + + if (path->grh_mlid & (1 << 7) || + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); rdma_ah_set_grh(ah_attr, NULL, diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 5724cbbe38b1..04d2e72017fe 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, */ if (udata && udata->outlen >= sizeof(__u64)) { cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); - if (!cq->ip) { - err = -ENOMEM; + if (IS_ERR(cq->ip)) { + err = PTR_ERR(cq->ip); goto bail_wc; } diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index 652f4a7efc1b..37853aa3bcf7 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -154,7 +154,7 @@ done: * @udata: user data (must be valid!) * @obj: opaque pointer to a cq, wq etc * - * Return: rvt_mmap struct on success + * Return: rvt_mmap struct on success, ERR_PTR on failure */ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, struct ib_udata *udata, void *obj) @@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); if (!ip) - return ip; + return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 0e1b291d2cec..500a7ee04c44 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->r_rq.wq); - if (!qp->ip) { - ret = ERR_PTR(-ENOMEM); + if (IS_ERR(qp->ip)) { + ret = ERR_CAST(qp->ip); goto bail_qpn; } diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 24fef021d51d..f547c115af03 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c @@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr, u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); - if (!srq->ip) { - ret = -ENOMEM; + if (IS_ERR(srq->ip)) { + ret = PTR_ERR(srq->ip); goto bail_wq; } diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index ae92c8080967..9f53aa4feb87 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) { struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct siw_device *sdev = to_siw_dev(pd->device); - struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); + struct siw_mem *mem; int rv = 0; siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); - if (unlikely(!mem || !base_mr)) { + if (unlikely(!base_mr)) { pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); return -EINVAL; } + if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); - rv = -EINVAL; - goto out; + return -EINVAL; } + + mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); + if (unlikely(!mem)) { + pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); + return -EINVAL; + } + if (unlikely(mem->pd != pd)) { pr_warn("siw: fastreg: PD mismatch\n"); rv = -EINVAL; diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index 2adfde8cdf19..2a11a63e7217 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -96,6 +96,8 @@ static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, if (!cmd) return; + memset(cmd, 0, sizeof(*cmd)); + if (vote_x == 0 && vote_y == 0) valid = false; @@ -112,8 +114,7 @@ static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, * Set the wait for completion flag on command that need to be completed * before the next command. */ - if (commit) - cmd->wait = true; + cmd->wait = commit; } static void tcs_list_gen(struct list_head *bcm_list, int bucket, diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 58b4a4dbfc78..2ab07ce17abb 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -362,7 +362,7 @@ config IPMMU_VMSA config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" - depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST) + depends on PPC_POWERNV || PPC_PSERIES select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -457,7 +457,7 @@ config S390_AP_IOMMU config MTK_IOMMU bool "MTK IOMMU Support" - depends on ARM || ARM64 || COMPILE_TEST + depends on HAS_DMA depends on ARCH_MEDIATEK || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 6be3853a5d97..2b9a67ecc6ac 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2936,7 +2936,7 @@ static int __init parse_amd_iommu_intr(char *str) { for (; *str; ++str) { if (strncmp(str, "legacy", 6) == 0) { - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; break; } if (strncmp(str, "vapic", 5) == 0) { diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ef0a5246700e..0182cff2c7ac 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -371,11 +371,11 @@ int dmar_disabled = 0; int dmar_disabled = 1; #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ -#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON +#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON int intel_iommu_sm = 1; #else int intel_iommu_sm; -#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ +#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 2b471419e26c..7b375421afba 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -170,6 +170,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) static void dev_iommu_free(struct device *dev) { + iommu_fwspec_free(dev); kfree(dev->iommu); dev->iommu = NULL; } @@ -1428,7 +1429,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) return group; } -EXPORT_SYMBOL(iommu_group_get_for_dev); +EXPORT_SYMBOL_GPL(iommu_group_get_for_dev); struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 0e2a96467767..5b3b270972f8 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -824,8 +824,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) qcom_iommu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) + if (res) { qcom_iommu->local_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qcom_iommu->local_base)) + return PTR_ERR(qcom_iommu->local_base); + } qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); if (IS_ERR(qcom_iommu->iface_clk)) { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 58fd137b6ae1..3e500098132f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); - if (!pgpath || !queue_io) + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, bio->bi_iter.bi_size); + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */ + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); + if ((pgpath && queue_io) || (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { /* Queue for the daemon to resubmit */ diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 49147e634046..fb41b4f23c48 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, fio->level++; if (type == DM_VERITY_BLOCK_TYPE_METADATA) - block += v->data_blocks; + block = block - v->hash_start + v->data_blocks; /* * For RS(M, N), the continuous FEC data is divided into blocks of N diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 114927da9cc9..613c171b1b6d 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -931,6 +931,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc) return 0; } +static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) +{ + struct dm_io_region region; + struct dm_io_request req; + + region.bdev = wc->ssd_dev->bdev; + region.sector = wc->start_sector; + region.count = n_sectors; + req.bi_op = REQ_OP_READ; + req.bi_op_flags = REQ_SYNC; + req.mem.type = DM_IO_VMA; + req.mem.ptr.vma = (char *)wc->memory_map; + req.client = wc->dm_io; + req.notify.fn = NULL; + + return dm_io(&req, 1, ®ion, NULL); +} + static void writecache_resume(struct dm_target *ti) { struct dm_writecache *wc = ti->private; @@ -941,8 +959,18 @@ static void writecache_resume(struct dm_target *ti) wc_lock(wc); - if (WC_MODE_PMEM(wc)) + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); + } else { + r = writecache_read_metadata(wc, wc->metadata_sectors); + if (r) { + size_t sb_entries_offset; + writecache_error(wc, r, "unable to read metadata: %d", r); + sb_entries_offset = offsetof(struct wc_memory_superblock, entries); + memset((char *)wc->memory_map + sb_entries_offset, -1, + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); + } + } wc->tree = RB_ROOT; INIT_LIST_HEAD(&wc->lru); @@ -2102,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Invalid block size"; goto bad; } + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { + r = -EINVAL; + ti->error = "Block size is smaller than device logical block size"; + goto bad; + } wc->block_size_bits = __ffs(wc->block_size); wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; @@ -2200,8 +2234,6 @@ invalid_optional: goto bad; } } else { - struct dm_io_region region; - struct dm_io_request req; size_t n_blocks, n_metadata_blocks; uint64_t n_bitmap_bits; @@ -2258,19 +2290,9 @@ invalid_optional: goto bad; } - region.bdev = wc->ssd_dev->bdev; - region.sector = wc->start_sector; - region.count = wc->metadata_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; - req.mem.type = DM_IO_VMA; - req.mem.ptr.vma = (char *)wc->memory_map; - req.client = wc->dm_io; - req.notify.fn = NULL; - - r = dm_io(&req, 1, ®ion, NULL); + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); if (r) { - ti->error = "Unable to read metadata"; + ti->error = "Unable to read first block of metadata"; goto bad; } } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3d21c38e2dbb..0c390fe421ad 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -203,11 +203,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } hw = to_me_hw(dev); hw->mem_addr = pcim_iomap_table(pdev)[0]; - hw->irq = pdev->irq; hw->read_fws = mei_me_read_fws; pci_enable_msi(pdev); + hw->irq = pdev->irq; + /* request and enable interrupt */ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 5bd0ab8b236a..baa6314f69b4 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -878,7 +878,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card) * Issued High Priority Interrupt, and check for card status * until out-of prg-state. */ -int mmc_interrupt_hpi(struct mmc_card *card) +static int mmc_interrupt_hpi(struct mmc_card *card) { int err; u32 status; diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index c2239ee2c0ef..75934f3c117e 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/slab.h> @@ -349,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) /* CQHCI is idle and should halt immediately, so set a small timeout */ #define CQHCI_OFF_TIMEOUT 100 +static u32 cqhci_read_ctl(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL); +} + static void cqhci_off(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; - ktime_t timeout; - bool timed_out; u32 reg; + int err; if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) return; @@ -364,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc) cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); - timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); - while (1) { - timed_out = ktime_compare(ktime_get(), timeout) > 0; - reg = cqhci_readl(cq_host, CQHCI_CTL); - if ((reg & CQHCI_HALT) || timed_out) - break; - } - - if (timed_out) + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); + if (err < 0) pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); else pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 8b038e7b2cd3..2e58743d83bb 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) meson_mx_mmc_start_cmd(mmc, mrq->cmd); } -static int meson_mx_mmc_card_busy(struct mmc_host *mmc) -{ - struct meson_mx_mmc_host *host = mmc_priv(mmc); - u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); - - return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); -} - static void meson_mx_mmc_read_response(struct mmc_host *mmc, struct mmc_command *cmd) { @@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t) static struct mmc_host_ops meson_mx_mmc_ops = { .request = meson_mx_mmc_request, .set_ios = meson_mx_mmc_set_ios, - .card_busy = meson_mx_mmc_card_busy, .get_cd = mmc_gpio_get_cd, .get_ro = mmc_gpio_get_ro, }; @@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) mmc->f_max = clk_round_rate(host->cfg_div_clk, clk_get_rate(host->parent_clk)); - mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; mmc->ops = &meson_mx_mmc_ops; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 09ff7315eb5e..a8bcb3f16aa4 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2087,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 525de2454a4d..2527244c2ae1 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card, struct sdhci_pci_slot *slot = sdhci_priv(host); struct intel_host *intel_host = sdhci_pci_priv(slot); + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) + return 0; + return intel_host->drv_strength; } diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 1dea1ba66f7b..4703cd540c7f 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); + + /* + * For some reason the controller's Host Control2 register reports + * the bit representing 1.8V signaling as 0 when read after it was + * written as 1. Subsequent read reports 1. + * + * Since this may cause some issues, do an empty read of the Host + * Control2 register here to circumvent this. + */ + sdhci_readw(host, SDHCI_HOST_CONTROL2); } static const struct sdhci_ops sdhci_xenon_ops = { diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 6435020d690d..51185e4d7d15 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n depends on NET_DSA_MV88E6XXX_GLOBAL2 + depends on PTP_1588_CLOCK imply NETWORK_PHY_TIMESTAMPING - imply PTP_1588_CLOCK help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index dd8a5666a584..2b4a723c8306 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3962,7 +3962,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, - .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390_phylink_validate, }; @@ -4021,7 +4020,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, - .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -4079,7 +4077,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, - .phylink_validate = mv88e6390_phylink_validate, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_validate = mv88e6390_phylink_validate, @@ -4235,7 +4232,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, - .phylink_validate = mv88e6390_phylink_validate, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index e5b6748f6654..a2dfd73f8a1a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -401,6 +401,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot->stats_layout = felix->info->stats_layout; ocelot->num_stats = felix->info->num_stats; ocelot->shared_queue_sz = felix->info->shared_queue_sz; + ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap_is2_keys = felix->info->vcap_is2_keys; ocelot->vcap_is2_actions= felix->info->vcap_is2_actions; ocelot->vcap = felix->info->vcap; diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 2ad793c0e1df..b94386fa8d63 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -16,6 +16,7 @@ struct felix_info { const u32 *const *map; const struct ocelot_ops *ops; int shared_queue_sz; + int num_mact_rows; const struct ocelot_stat_layout *stats_layout; unsigned int num_stats; int num_ports; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 4fe707ef54b8..1c56568d5aca 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1222,6 +1222,7 @@ struct felix_info felix_info_vsc9959 = { .vcap_is2_actions = vsc9959_vcap_is2_actions, .vcap = vsc9959_vcap_props, .shared_queue_sz = 128 * 1024, + .num_mact_rows = 2048, .num_ports = 6, .switch_pci_bar = 4, .imdio_pci_bar = 0, diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig index 0fe1ae173aa1..68c3086af9af 100644 --- a/drivers/net/dsa/sja1105/Kconfig +++ b/drivers/net/dsa/sja1105/Kconfig @@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support" config NET_DSA_SJA1105_PTP bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch" depends on NET_DSA_SJA1105 + depends on PTP_1588_CLOCK help This enables support for timestamping and PTP clock manipulations in the SJA1105 DSA driver. diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index a22f8e3fc06b..bc0e47c1dbb9 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -16,14 +16,15 @@ /* PTPSYNCTS has no interrupt or update mechanism, because the intended * hardware use case is for the timestamp to be collected synchronously, - * immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC - * pulse on the PTP_CLK pin. When used as a generic extts source, it needs - * polling and a comparison with the old value. The polling interval is just - * the Nyquist rate of a canonical PPS input (e.g. from a GPS module). - * Anything of higher frequency than 1 Hz will be lost, since there is no - * timestamp FIFO. + * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC + * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a + * generic extts source, the PTPSYNCTS register needs polling and a comparison + * with the old value. The polling interval is configured as the Nyquist rate + * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that + * this hardware can do (but may be enough for some setups). Anything of higher + * frequency than 1 Hz will be lost, since there is no timestamp FIFO. */ -#define SJA1105_EXTTS_INTERVAL (HZ / 2) +#define SJA1105_EXTTS_INTERVAL (HZ / 4) /* This range is actually +/- SJA1105_MAX_ADJ_PPB * divided by 1000 (ppb -> ppm) and with a 16-bit @@ -754,7 +755,16 @@ static int sja1105_extts_enable(struct sja1105_private *priv, return -EOPNOTSUPP; /* Reject requests with unsupported flags */ - if (extts->flags) + if (extts->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* We can only enable time stamping on both edges, sadly. */ + if ((extts->flags & PTP_STRICT_FLAGS) && + (extts->flags & PTP_ENABLE_FEATURE) && + (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) return -EOPNOTSUPP; rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index bd278c4721c6..7df67bf09b93 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -69,7 +69,7 @@ * 16kB. */ #if PAGE_SIZE > SZ_16K -#define ENA_PAGE_SIZE SZ_16K +#define ENA_PAGE_SIZE (_AC(SZ_16K, UL)) #else #define ENA_PAGE_SIZE PAGE_SIZE #endif diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index ce46cdbc69e6..d10fff8a8c71 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -65,7 +65,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, - { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index a5d1a6cb9ce3..6795b6d95f54 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; + struct resource *regs; const u8 *mac_addr; bgmac = bgmac_alloc(&pdev->dev); @@ -206,16 +207,21 @@ static int bgmac_probe(struct platform_device *pdev) if (IS_ERR(bgmac->plat.base)) return PTR_ERR(bgmac->plat.base); - bgmac->plat.idm_base = - devm_platform_ioremap_resource_byname(pdev, "idm_base"); - if (IS_ERR(bgmac->plat.idm_base)) - return PTR_ERR(bgmac->plat.idm_base); - bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (regs) { + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); + bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; + } - bgmac->plat.nicpm_base = - devm_platform_ioremap_resource_byname(pdev, "nicpm_base"); - if (IS_ERR(bgmac->plat.nicpm_base)) - return PTR_ERR(bgmac->plat.nicpm_base); + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); + if (regs) { + bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(bgmac->plat.nicpm_base)) + return PTR_ERR(bgmac->plat.nicpm_base); + } bgmac->read = platform_bgmac_read; bgmac->write = platform_bgmac_write; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4bbfea147d98..f86b6217f829 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6661,7 +6661,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, int rc; if (!mem_size) - return 0; + return -EINVAL; ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { @@ -9803,6 +9803,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { struct bnxt *bp = netdev_priv(dev); + netdev_features_t vlan_features; if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; @@ -9819,12 +9820,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { + vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); + if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) { if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) features &= ~(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); - else + else if (vlan_features) features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; } @@ -12248,12 +12251,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) bnxt_ulp_start(bp, err); } - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) - dev_close(netdev); + if (result != PCI_ERS_RESULT_RECOVERED) { + if (netif_running(netdev)) + dev_close(netdev); + pci_disable_device(pdev); + } rtnl_unlock(); - return PCI_ERS_RESULT_RECOVERED; + return result; } /** diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c15517ff7ff6..c04ac4a36005 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1082,7 +1082,6 @@ struct bnxt_vf_info { #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 #define BNXT_VF_TRUST 0x10 - u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; void *hwrm_cmd_req_addr; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 95f893f2a74d..d5c8bd49383a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) #define BNXT_NVM_CFG_VER_BITS 24 #define BNXT_NVM_CFG_VER_BYTES 4 -#define BNXT_MSIX_VEC_MAX 1280 +#define BNXT_MSIX_VEC_MAX 512 #define BNXT_MSIX_VEC_MIN_MAX 128 enum bnxt_nvm_dir_type { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c883e8884faf..3a9a51f7063a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) if (old_setting == setting) return 0; - func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ @@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - vf->func_flags = func_flags; if (setting) vf->flags |= BNXT_VF_SPOOFCHK; else @@ -228,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -266,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -305,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); @@ -477,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) vf = &bp->pf.vf[vf_id]; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); if (is_valid_ether_addr(vf->mac_addr)) { req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 53b50c24d9c9..2c4c12b03502 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -35,8 +35,8 @@ config MACB config MACB_USE_HWSTAMP bool "Use IEEE 1588 hwstamp" depends on MACB + depends on PTP_1588_CLOCK default y - imply PTP_1588_CLOCK ---help--- Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB. diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index a0e8c5bbabc0..36290a8e2a84 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) int status; status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); goto mdio_pm_exit; + } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, int status; status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); goto mdio_pm_exit; + } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -3816,8 +3820,10 @@ static int at91ether_open(struct net_device *dev) int ret; ret = pm_runtime_get_sync(&lp->pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&lp->pdev->dev); return ret; + } /* Clear internal statistics */ ctl = macb_readl(lp, NCR); @@ -4172,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, static int fu540_c000_init(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) - return -ENODEV; - - mgmt->reg = ioremap(res->start, resource_size(res)); - if (!mgmt->reg) - return -ENOMEM; + mgmt->reg = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mgmt->reg)) + return PTR_ERR(mgmt->reg); return macb_init(pdev); } diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 6a700d34019e..4520e7ee00fe 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -54,7 +54,7 @@ config THUNDER_NIC_RGX config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" depends on 64BIT && PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK ---help--- This driver adds support for the Precision Time Protocol Clocks and Timestamping coprocessor (PTP) found on Cavium processors. diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index f5dd34db4b54..6516c45864b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2207,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev, if (unlikely(skip_eotx_wr)) { start = (u64 *)wr; eosw_txq->state = next_state; + eosw_txq->cred -= wrlen16; + eosw_txq->ncompl++; + eosw_txq->last_compl = 0; goto write_wr_headers; } @@ -2365,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) return cxgb4_eth_xmit(skb, dev); } +static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) +{ + int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; + int pidx = eosw_txq->pidx; + struct sk_buff *skb; + + if (!pktcount) + return; + + if (pktcount < 0) + pktcount += eosw_txq->ndesc; + + while (pktcount--) { + pidx--; + if (pidx < 0) + pidx += eosw_txq->ndesc; + + skb = eosw_txq->desc[pidx].skb; + if (skb) { + dev_consume_skb_any(skb); + eosw_txq->desc[pidx].skb = NULL; + eosw_txq->inuse--; + } + } + + eosw_txq->pidx = eosw_txq->last_pidx + 1; +} + /** * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. * @dev - netdevice @@ -2440,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) FW_FLOWC_MNEM_EOSTATE_CLOSING : FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); - eosw_txq->cred -= len16; - eosw_txq->ncompl++; - eosw_txq->last_compl = 0; + /* Free up any pending skbs to ensure there's room for + * termination FLOWC. + */ + if (tc == FW_SCHED_CLS_NONE) + eosw_txq_flush_pending_skbs(eosw_txq); ret = eosw_txq_enqueue(eosw_txq, skb); if (ret) { @@ -2695,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) * is ever running at a time ... */ static void service_ofldq(struct sge_uld_txq *q) + __must_hold(&q->sendq.lock) { u64 *pos, *before, *end; int credits; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c index ebc635f8a4cc..15f37c5b8dc1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -74,8 +74,8 @@ err_pci_mem_reg: pci_disable_device(pdev); err_pci_enable: err_mdiobus_alloc: - iounmap(port_regs); err_hw_alloc: + iounmap(port_regs); err_ioremap: return err; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4bd33245bad6..3de549c6c693 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work) rc = do_hard_reset(adapter, rwi, reset_state); rtnl_unlock(); } - } else { + } else if (!(rwi->reset_reason == VNIC_RESET_FATAL && + adapter->from_passive_init)) { rc = do_reset(adapter, rwi, reset_state); } kfree(rwi); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8972cdd559e8..7352244c5e68 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1428,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, struct mvpp2_ethtool_fs *efs; int ret; + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) + return -EINVAL; + efs = port->rfs_rules[info->fs.location]; if (!efs) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 1fa60e985b43..2b5dad2ec650 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4329,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, if (!mvpp22_rss_is_supported()) return -EOPNOTSUPP; + if (rss_context >= MVPP22_N_RSS_TABLES) + return -EINVAL; if (hfunc) *hfunc = ETH_RSS_HASH_CRC32; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5716c3d2bb86..c72c4e1ea383 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 34cba97f7bf4..cede5bdfd598 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || @@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + /* no doorbell, no need to keep the entry */ + free_ent(cmd, ent->idx); + if (ent->callback) + free_cmd(ent); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2de54d865dc8..1eac7a53d56f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) { - int err = mlx5e_init_rep_rx(priv); - - if (err) - return err; - mlx5e_create_q_counters(priv); - return 0; + return mlx5e_init_rep_rx(priv); } static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) { - mlx5e_destroy_q_counters(priv); mlx5e_cleanup_rep_rx(priv); + mlx5e_destroy_q_counters(priv); } static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 781c1d184b60..57ac2ef52e80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) MLX5_FLOW_NAMESPACE_KERNEL, 1, modact); if (IS_ERR(mod_hdr)) { + err = PTR_ERR(mod_hdr); esw_warn(dev, "Failed to create restore mod header, err: %d\n", err); - err = PTR_ERR(mod_hdr); goto err_mod_hdr; } @@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); + mutex_init(&esw->fdb_table.offloads.vports.lock); + hash_init(esw->fdb_table.offloads.vports.table); err = esw_create_uplink_offloads_acl_tables(esw); if (err) - return err; + goto create_acl_err; err = esw_create_offloads_table(esw, total_vports); if (err) @@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_fg_err; - mutex_init(&esw->fdb_table.offloads.vports.lock); - hash_init(esw->fdb_table.offloads.vports.table); - return 0; create_fg_err: @@ -2253,18 +2252,19 @@ create_restore_err: esw_destroy_offloads_table(esw); create_offloads_err: esw_destroy_uplink_offloads_acl_tables(esw); - +create_acl_err: + mutex_destroy(&esw->fdb_table.offloads.vports.lock); return err; } static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) { - mutex_destroy(&esw->fdb_table.offloads.vports.lock); esw_destroy_vport_rx_group(esw); esw_destroy_offloads_fdb_tables(esw); esw_destroy_restore_table(esw); esw_destroy_offloads_table(esw); esw_destroy_uplink_offloads_acl_tables(esw); + mutex_destroy(&esw->fdb_table.offloads.vports.lock); } static void @@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) err_vports: esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); err_uplink: - esw_set_passing_vport_metadata(esw, false); -err_steering_init: esw_offloads_steering_cleanup(esw); +err_steering_init: + esw_set_passing_vport_metadata(esw, false); err_vport_metadata: mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index c4ed25bb9ac8..b8d97d44be7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -693,6 +693,12 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) return 0; } +static void dr_cq_complete(struct mlx5_core_cq *mcq, + struct mlx5_eqe *eqe) +{ + pr_err("CQ completion CQ: #%u\n", mcq->cqn); +} + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, struct mlx5_uars_page *uar, size_t ncqe) @@ -753,6 +759,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); + cq->mcq.comp = dr_cq_complete; + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); kvfree(in); @@ -763,7 +771,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, cq->mcq.set_ci_db = cq->wq_ctrl.db.db; cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; *cq->mcq.set_ci_db = 0; - *cq->mcq.arm_db = 0; + + /* set no-zero value, in order to avoid the HW to run db-recovery on + * CQ that used in polling mode. + */ + *cq->mcq.arm_db = cpu_to_be32(2 << 28); + cq->mcq.vector = 0; cq->mcq.irqn = irqn; cq->mcq.uar = uar; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 430da69003d8..a6e30e020b5c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, unsigned int priority, struct mlxsw_afk_element_usage *elusage) { + struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; struct mlxsw_sp_acl_tcam_vregion *vregion; - struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct list_head *pos; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) @@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); - list_add_tail(&vchunk->list, &vregion->vchunk_list); + + /* Position the vchunk inside the list according to priority */ + list_for_each(pos, &vregion->vchunk_list) { + vchunk2 = list_entry(pos, typeof(*vchunk2), list); + if (vchunk2->priority > priority) + break; + } + list_add_tail(&vchunk->list, pos); mutex_unlock(&vregion->lock); return vchunk; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 89c2e9820e95..0897ca1967ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -36,7 +36,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); if (err) return err; - } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) { + } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED && + act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) { NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 1b25cc42f442..49fd843c4c8a 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -565,7 +565,7 @@ static int moxart_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); + devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a2b9b85612a4..c798431ce2a1 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1030,10 +1030,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, { int i, j; - /* Loop through all the mac tables entries. There are 1024 rows of 4 - * entries. - */ - for (i = 0; i < 1024; i++) { + /* Loop through all the mac tables entries. */ + for (i = 0; i < ocelot->num_mact_rows; i++) { for (j = 0; j < 4; j++) { struct ocelot_mact_entry entry; bool is_static; @@ -1458,8 +1456,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) { - ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2), - ANA_AUTOAGE); + unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); + + /* Setting AGE_PERIOD to zero effectively disables automatic aging, + * which is clearly not what our intention is. So avoid that. + */ + if (!age_period) + age_period = 1; + + ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE); } EXPORT_SYMBOL(ocelot_set_ageing_time); diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index ed4dd01a41ad..81d81ff75646 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -433,6 +433,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) ocelot->stats_layout = ocelot_stats_layout; ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); ocelot->shared_queue_sz = 224 * 1024; + ocelot->num_mact_rows = 1024; ocelot->ops = ops; ret = ocelot_regfields_init(ocelot, ocelot_regfields); diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index bfa0c0d39600..8b018ed37b1b 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -208,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev) err = register_netdev(dev); if (err) - goto out1; + goto undo_probe1; return 0; -out1: +undo_probe1: + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 9183b3e85d21..354efffac0f9 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, if (!nfp_nsp_has_hwinfo_lookup(nsp)) { nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); eth_hw_addr_random(nn->dp.netdev); + nfp_nsp_close(nsp); return; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index 5f8fc58d42b3..11621ccc1faf 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); - debugfs_create_u8("done_color", 0400, cq_dentry, - (u8 *)&cq->done_color); + debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color); debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5acf4f46c268..d5293bfded29 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2101,6 +2101,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) ionic_txrx_free(lif); } ionic_lifs_deinit(ionic); + ionic_reset(ionic); ionic_qcqs_free(lif); dev_info(ionic->dev, "FW Down: LIFs stopped\n"); @@ -2116,6 +2117,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) dev_info(ionic->dev, "FW Up: restarting LIFs\n"); + ionic_init_devinfo(ionic); err = ionic_qcqs_alloc(lif); if (err) goto err_out; @@ -2549,8 +2551,6 @@ int ionic_lifs_register(struct ionic *ionic) dev_err(ionic->dev, "Cannot register net device, aborting\n"); return err; } - - ionic_link_status_check_request(ionic->master_lif); ionic->master_lif->registered = true; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 494c859b4ade..67ba67ed0cb9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, total_offset += offset; } - total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000; + total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL; total_ctr += total_offset; ctr_low = do_div(total_ctr, 1000000000); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 90bddca1ddd8..8f08393f25dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4051,7 +4051,7 @@ static int stmmac_set_features(struct net_device *netdev, /** * stmmac_interrupt - main ISR * @irq: interrupt number. - * @dev_id: to pass the net device pointer. + * @dev_id: to pass the net device pointer (must be valid). * Description: this is the main driver interrupt service routine. * It can call: * o DMA service routine (to manage incoming frame reception and transmission @@ -4075,11 +4075,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (priv->irq_wake) pm_wakeup_event(priv->device, 0); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 519f5a8c5810..988e907e3322 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -90,9 +90,8 @@ config TI_CPTS config TI_CPTS_MOD tristate depends on TI_CPTS + depends on PTP_1588_CLOCK default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y - select NET_PTP_CLASSIFY - imply PTP_1588_CLOCK default m config TI_K3_AM65_CPSW_NUSS diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index bb391286d89e..f8c589929308 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1887,7 +1887,8 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common) ret = devm_request_irq(dev, tx_chn->irq, am65_cpsw_nuss_tx_irq, - 0, tx_chn->tx_chn_name, tx_chn); + IRQF_TRIGGER_HIGH, + tx_chn->tx_chn_name, tx_chn); if (ret) { dev_err(dev, "failure requesting tx%u irq %u, %d\n", tx_chn->id, tx_chn->irq, ret); @@ -1912,7 +1913,7 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) ret = devm_request_irq(dev, common->rx_chns.irq, am65_cpsw_nuss_rx_irq, - 0, dev_name(dev), common); + IRQF_TRIGGER_HIGH, dev_name(dev), common); if (ret) { dev_err(dev, "failure requesting rx irq %u, %d\n", common->rx_chns.irq, ret); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index b50c3ec3495b..6bcda20ed7e7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev) linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); } - linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_andnot(phydev->supported, phydev->supported, mask); linkmode_copy(phydev->advertising, phydev->supported); lp->link = 0; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 672cd2caf2fb..21640a035d7d 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1169,11 +1169,11 @@ out_unlock: static struct genl_family gtp_genl_family; static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, - u32 type, struct pdp_ctx *pctx) + int flags, u32 type, struct pdp_ctx *pctx) { void *genlh; - genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, + genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, type); if (genlh == NULL) goto nlmsg_failure; @@ -1227,8 +1227,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) goto err_unlock; } - err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, - info->snd_seq, info->nlhdr->nlmsg_type, pctx); + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, + 0, info->nlhdr->nlmsg_type, pctx); if (err < 0) goto err_unlock_free; @@ -1271,6 +1271,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, gtp_genl_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + NLM_F_MULTI, cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; cb->args[1] = j; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c0b647a4c893..5de57fc3ec60 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -707,7 +707,8 @@ no_memory: goto drop; } -static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, + struct net_device *ndev) { return netvsc_xmit(skb, ndev, false); } diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 8ccbbb920c11..66570609c845 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1063,6 +1063,7 @@ static void gsi_isr_gp_int1(struct gsi *gsi) complete(&gsi->completion); } + /* Inter-EE interrupt handler */ static void gsi_isr_glob_ee(struct gsi *gsi) { @@ -1515,6 +1516,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, struct completion *completion = &gsi->completion; u32 val; + /* First zero the result code field */ + val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); + val &= ~GENERIC_EE_RESULT_FMASK; + iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); + + /* Now issue the command */ val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); @@ -1820,9 +1827,9 @@ static int gsi_channel_init_one(struct gsi *gsi, /* Worst case we need an event for every outstanding TRE */ if (data->channel.tre_count > data->channel.event_count) { - dev_warn(gsi->dev, "channel %u limited to %u TREs\n", - data->channel_id, data->channel.tre_count); tre_count = data->channel.event_count; + dev_warn(gsi->dev, "channel %u limited to %u TREs\n", + data->channel_id, tre_count); } else { tre_count = data->channel.tre_count; } diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index 7613b9cc7cf6..acc9e744c67d 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -410,6 +410,8 @@ #define INTER_EE_RESULT_FMASK GENMASK(2, 0) #define GENERIC_EE_RESULT_FMASK GENMASK(7, 5) #define GENERIC_EE_SUCCESS_FVAL 1 +#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3 +#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5 #define GENERIC_EE_NO_RESOURCES_FVAL 7 #define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */ #define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 82066a223a67..5fec30e542cb 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1269,6 +1269,67 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) ret, endpoint->channel_id, endpoint->endpoint_id); } +static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) +{ + u16 size = IPA_ENDPOINT_STOP_RX_SIZE; + struct gsi_trans *trans; + dma_addr_t addr; + int ret; + + trans = ipa_cmd_trans_alloc(ipa, 1); + if (!trans) { + dev_err(&ipa->pdev->dev, + "no transaction for RX endpoint STOP workaround\n"); + return -EBUSY; + } + + /* Read into the highest part of the zero memory area */ + addr = ipa->zero_addr + ipa->zero_size - size; + + ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); + + ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); + if (ret) + gsi_trans_free(trans); + + return ret; +} + +/** + * ipa_endpoint_stop() - Stops a GSI channel in IPA + * @client: Client whose endpoint should be stopped + * + * This function implements the sequence to stop a GSI channel + * in IPA. This function returns when the channel is is STOP state. + * + * Return value: 0 on success, negative otherwise + */ +int ipa_endpoint_stop(struct ipa_endpoint *endpoint) +{ + u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES; + int ret; + + do { + struct ipa *ipa = endpoint->ipa; + struct gsi *gsi = &ipa->gsi; + + ret = gsi_channel_stop(gsi, endpoint->channel_id); + if (ret != -EAGAIN || endpoint->toward_ipa) + break; + + /* For IPA v3.5.1, send a DMA read task and check again */ + if (ipa->version == IPA_VERSION_3_5_1) { + ret = ipa_endpoint_stop_rx_dma(ipa); + if (ret) + break; + } + + msleep(1); + } while (retries--); + + return retries ? ret : -EIO; +} + static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { if (endpoint->toward_ipa) { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index ea3f25cc79ef..20b53e255f68 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1305,7 +1305,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; @@ -2640,11 +2641,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) if (ret) goto rollback; - rtnl_unlock(); /* Force features update, since they are different for SW MACSec and * HW offloading cases. */ netdev_update_features(dev); + + rtnl_unlock(); return 0; rollback: diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 415c27310982..ecbd5e0d685c 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) goto out; } dp83640_clock_init(clock, bus); - list_add_tail(&phyter_clocks, &clock->list); + list_add_tail(&clock->list, &phyter_clocks); out: mutex_unlock(&phyter_clocks_lock); diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index fe9aa3ad52a7..1dd19d0cb269 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -137,19 +137,18 @@ static int dp83822_set_wol(struct phy_device *phydev, value &= ~DP83822_WOL_SECURE_ON; } - value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | - DP83822_WOL_CLR_INDICATION); - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, - value); + /* Clear any pending WoL interrupt */ + phy_read(phydev, MII_DP83822_MISR2); + + value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | + DP83822_WOL_CLR_INDICATION; + + return phy_write_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_WOL_CFG, value); } else { - value = phy_read_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_WOL_CFG); - value &= ~DP83822_WOL_EN; - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, - value); + return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_WOL_CFG, DP83822_WOL_EN); } - - return 0; } static void dp83822_get_wol(struct phy_device *phydev, @@ -258,12 +257,11 @@ static int dp83822_config_intr(struct phy_device *phydev) static int dp83822_config_init(struct phy_device *phydev) { - int value; - - value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN; + int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | + DP83822_WOL_SECURE_ON; - return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, - value); + return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_WOL_CFG, value); } static int dp83822_phy_reset(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 06f08832ebcd..d73725312c7c 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -139,16 +139,19 @@ static int dp83811_set_wol(struct phy_device *phydev, value &= ~DP83811_WOL_SECURE_ON; } - value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | - DP83811_WOL_CLR_INDICATION); - phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, - value); + /* Clear any pending WoL interrupt */ + phy_read(phydev, MII_DP83811_INT_STAT1); + + value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | + DP83811_WOL_CLR_INDICATION; + + return phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG, value); } else { - phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, - DP83811_WOL_EN); + return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG, DP83811_WOL_EN); } - return 0; } static void dp83811_get_wol(struct phy_device *phydev, @@ -292,8 +295,8 @@ static int dp83811_config_init(struct phy_device *phydev) value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; - return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, - value); + return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); } static int dp83811_phy_reset(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 8352c09004c7..d4c2e62b2439 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -66,6 +66,9 @@ enum { MV_PCS_CSSR1_SPD2_2500 = 0x0004, MV_PCS_CSSR1_SPD2_10000 = 0x0000, + /* Temperature read register (88E2110 only) */ + MV_PCS_TEMP = 0x8042, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -77,6 +80,7 @@ enum { MV_V2_PORT_CTRL = 0xf001, MV_V2_PORT_CTRL_SWRST = BIT(15), MV_V2_PORT_CTRL_PWRDOWN = BIT(11), + /* Temperature control/read registers (88X3310 only) */ MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, MV_V2_TEMP_CTRL_SAMPLE = 0x0000, @@ -104,6 +108,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data, return 0; } +static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev) +{ + return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); +} + +static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev) +{ + return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP); +} + +static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev) +{ + if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310) + return mv3310_hwmon_read_temp_reg(phydev); + else /* MARVELL_PHY_ID_88E2110 */ + return mv2110_hwmon_read_temp_reg(phydev); +} + static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *value) { @@ -116,7 +138,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, } if (type == hwmon_temp && attr == hwmon_temp_input) { - temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); + temp = mv10g_hwmon_read_temp_reg(phydev); if (temp < 0) return temp; @@ -169,6 +191,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) u16 val; int ret; + if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310) + return 0; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP, MV_V2_TEMP_UNKNOWN); if (ret < 0) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6c738a271257..4bb8552a00d3 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 5c964fcb994e..71b8e80b58e1 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, if (multicore) { queue->worker = wg_packet_percpu_multicore_worker_alloc( function, queue); - if (!queue->worker) + if (!queue->worker) { + ptr_ring_cleanup(&queue->ring, NULL); return -ENOMEM; + } } else { INIT_WORK(&queue->work, function); } diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index da3b782ab7d3..3bb5b9ae7cd1 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker(struct work_struct *work) static void keep_key_fresh(struct wg_peer *peer) { struct noise_keypair *keypair; - bool send = false; + bool send; if (peer->sent_lastminute_handshake) return; rcu_read_lock_bh(); keypair = rcu_dereference_bh(peer->keypairs.current_keypair); - if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && - keypair->i_am_the_initiator && - unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, - REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) - send = true; + send = keypair && READ_ONCE(keypair->sending.is_valid) && + keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); rcu_read_unlock_bh(); - if (send) { + if (unlikely(send)) { peer->sent_lastminute_handshake = true; wg_packet_send_queued_handshake_initiation(peer, false); } @@ -393,13 +392,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) goto dishonest_packet_size; - if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) - IP_ECN_set_ce(ip_hdr(skb)); + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); } else if (skb->protocol == htons(ETH_P_IPV6)) { len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); - if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) - IP6_ECN_set_ce(skb, ipv6_hdr(skb)); + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); } else { goto dishonest_packet_type; } @@ -518,6 +515,8 @@ void wg_packet_decrypt_worker(struct work_struct *work) &PACKET_CB(skb)->keypair->receiving)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; wg_queue_enqueue_per_peer_napi(skb, state); + if (need_resched()) + cond_resched(); } } diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c index bcd6462e4540..007cd4457c5f 100644 --- a/drivers/net/wireguard/selftest/ratelimiter.c +++ b/drivers/net/wireguard/selftest/ratelimiter.c @@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void) enum { TRIALS_BEFORE_GIVING_UP = 5000 }; bool success = false; int test = 0, trials; - struct sk_buff *skb4, *skb6; + struct sk_buff *skb4, *skb6 = NULL; struct iphdr *hdr4; - struct ipv6hdr *hdr6; + struct ipv6hdr *hdr6 = NULL; if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) return true; diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 7348c10cbae3..6687db699803 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg, static void keep_key_fresh(struct wg_peer *peer) { struct noise_keypair *keypair; - bool send = false; + bool send; rcu_read_lock_bh(); keypair = rcu_dereference_bh(peer->keypairs.current_keypair); - if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && - (unlikely(atomic64_read(&keypair->sending.counter.counter) > - REKEY_AFTER_MESSAGES) || - (keypair->i_am_the_initiator && - unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, - REKEY_AFTER_TIME))))) - send = true; + send = keypair && READ_ONCE(keypair->sending.is_valid) && + (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || + (keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); rcu_read_unlock_bh(); - if (send) + if (unlikely(send)) wg_packet_send_queued_handshake_initiation(peer, false); } @@ -281,6 +278,8 @@ void wg_packet_tx_worker(struct work_struct *work) wg_noise_keypair_put(keypair, false); wg_peer_put(peer); + if (need_resched()) + cond_resched(); } } @@ -304,7 +303,8 @@ void wg_packet_encrypt_worker(struct work_struct *work) } wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, state); - + if (need_resched()) + cond_resched(); } } diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index b0d6541582d3..f9018027fc13 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); goto err; - } else if (unlikely(rt->dst.dev == skb->dev)) { - ip_rt_put(rt); - ret = -ELOOP; - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", - wg->dev->name, &endpoint->addr); - goto err; } if (cache) dst_cache_set_ip4(cache, &rt->dst, fl.saddr); @@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); goto err; - } else if (unlikely(dst->dev == skb->dev)) { - dst_release(dst); - ret = -ELOOP; - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", - wg->dev->name, &endpoint->addr); - goto err; } if (cache) dst_cache_set_ip6(cache, dst, &fl.saddr); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 91c1bd659947..f2adea96b04c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3642,6 +3642,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) return; out_put_disk: + /* prevent double queue cleanup */ + ns->disk->queue = NULL; put_disk(ns->disk); out_unlink_ns: mutex_lock(&ctrl->subsys->lock); diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig index a208aca4ba7b..c591c958f1eb 100644 --- a/drivers/phy/tegra/Kconfig +++ b/drivers/phy/tegra/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config PHY_TEGRA_XUSB tristate "NVIDIA Tegra XUSB pad controller driver" - depends on ARCH_TEGRA + depends on ARCH_TEGRA && USB_SUPPORT + select USB_COMMON select USB_CONN_GPIO select USB_PHY help diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c index b7f2c00db5e1..9c4af76a9956 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub.c +++ b/drivers/platform/chrome/cros_ec_sensorhub.c @@ -52,28 +52,15 @@ static int cros_ec_sensorhub_register(struct device *dev, int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 }; struct cros_ec_command *msg = sensorhub->msg; struct cros_ec_dev *ec = sensorhub->ec; - int ret, i, sensor_num; + int ret, i; char *name; - sensor_num = cros_ec_get_sensor_count(ec); - if (sensor_num < 0) { - dev_err(dev, - "Unable to retrieve sensor information (err:%d)\n", - sensor_num); - return sensor_num; - } - - sensorhub->sensor_num = sensor_num; - if (sensor_num == 0) { - dev_err(dev, "Zero sensors reported.\n"); - return -EINVAL; - } msg->version = 1; msg->insize = sizeof(struct ec_response_motion_sense); msg->outsize = sizeof(struct ec_params_motion_sense); - for (i = 0; i < sensor_num; i++) { + for (i = 0; i < sensorhub->sensor_num; i++) { sensorhub->params->cmd = MOTIONSENSE_CMD_INFO; sensorhub->params->info.sensor_num = i; @@ -140,8 +127,7 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) struct cros_ec_dev *ec = dev_get_drvdata(dev->parent); struct cros_ec_sensorhub *data; struct cros_ec_command *msg; - int ret; - int i; + int ret, i, sensor_num; msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) + max((u16)sizeof(struct ec_params_motion_sense), @@ -166,10 +152,52 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) dev_set_drvdata(dev, data); /* Check whether this EC is a sensor hub. */ - if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) { + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) { + sensor_num = cros_ec_get_sensor_count(ec); + if (sensor_num < 0) { + dev_err(dev, + "Unable to retrieve sensor information (err:%d)\n", + sensor_num); + return sensor_num; + } + if (sensor_num == 0) { + dev_err(dev, "Zero sensors reported.\n"); + return -EINVAL; + } + data->sensor_num = sensor_num; + + /* + * Prepare the ring handler before enumering the + * sensors. + */ + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + ret = cros_ec_sensorhub_ring_allocate(data); + if (ret) + return ret; + } + + /* Enumerate the sensors.*/ ret = cros_ec_sensorhub_register(dev, data); if (ret) return ret; + + /* + * When the EC does not have a FIFO, the sensors will query + * their data themselves via sysfs or a software trigger. + */ + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + ret = cros_ec_sensorhub_ring_add(data); + if (ret) + return ret; + /* + * The msg and its data is not under the control of the + * ring handler. + */ + return devm_add_action_or_reset(dev, + cros_ec_sensorhub_ring_remove, + data); + } + } else { /* * If the device has sensors but does not claim to @@ -184,22 +212,6 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) } } - /* - * If the EC does not have a FIFO, the sensors will query their data - * themselves via sysfs or a software trigger. - */ - if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { - ret = cros_ec_sensorhub_ring_add(data); - if (ret) - return ret; - /* - * The msg and its data is not under the control of the ring - * handler. - */ - return devm_add_action_or_reset(dev, - cros_ec_sensorhub_ring_remove, - data); - } return 0; } diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index c48e5b38a441..24e48d96ed76 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -957,17 +957,15 @@ static int cros_ec_sensorhub_event(struct notifier_block *nb, } /** - * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC - * supports it. + * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC + * supports it. * * @sensorhub : Sensor Hub object. * * Return: 0 on success. */ -int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) +int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub) { - struct cros_ec_dev *ec = sensorhub->ec; - int ret; int fifo_info_length = sizeof(struct ec_response_motion_sense_fifo_info) + sizeof(u16) * sensorhub->sensor_num; @@ -978,6 +976,49 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) if (!sensorhub->fifo_info) return -ENOMEM; + /* + * Allocate the callback area based on the number of sensors. + * Add one for the sensor ring. + */ + sensorhub->push_data = devm_kcalloc(sensorhub->dev, + sensorhub->sensor_num, + sizeof(*sensorhub->push_data), + GFP_KERNEL); + if (!sensorhub->push_data) + return -ENOMEM; + + sensorhub->tight_timestamps = cros_ec_check_features( + sensorhub->ec, + EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); + + if (sensorhub->tight_timestamps) { + sensorhub->batch_state = devm_kcalloc(sensorhub->dev, + sensorhub->sensor_num, + sizeof(*sensorhub->batch_state), + GFP_KERNEL); + if (!sensorhub->batch_state) + return -ENOMEM; + } + + return 0; +} + +/** + * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC + * supports it. + * + * @sensorhub : Sensor Hub object. + * + * Return: 0 on success. + */ +int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) +{ + struct cros_ec_dev *ec = sensorhub->ec; + int ret; + int fifo_info_length = + sizeof(struct ec_response_motion_sense_fifo_info) + + sizeof(u16) * sensorhub->sensor_num; + /* Retrieve FIFO information */ sensorhub->msg->version = 2; sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO; @@ -998,31 +1039,9 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) if (!sensorhub->ring) return -ENOMEM; - /* - * Allocate the callback area based on the number of sensors. - */ - sensorhub->push_data = devm_kcalloc( - sensorhub->dev, sensorhub->sensor_num, - sizeof(*sensorhub->push_data), - GFP_KERNEL); - if (!sensorhub->push_data) - return -ENOMEM; - sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = cros_ec_get_time_ns(); - sensorhub->tight_timestamps = cros_ec_check_features( - ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); - - if (sensorhub->tight_timestamps) { - sensorhub->batch_state = devm_kcalloc(sensorhub->dev, - sensorhub->sensor_num, - sizeof(*sensorhub->batch_state), - GFP_KERNEL); - if (!sensorhub->batch_state) - return -ENOMEM; - } - /* Register the notifier that will act as a top half interrupt. */ sensorhub->notifier.notifier_call = cros_ec_sensorhub_event; ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 6f12747a359a..c4404d9c1de4 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -515,9 +515,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { .detect_quirks = asus_nb_wmi_quirks, }; +static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { + { + /* + * asus-nb-wm adds no functionality. The T100TA has a detachable + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading + * asus-nb-wm causes the camera LED to turn and _stay_ on. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), + }, + }, + { + /* The Asus T200TA has the same issue as the T100TA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} /* Terminating entry */ +}; static int __init asus_nb_wmi_init(void) { + if (dmi_check_system(asus_nb_wmi_blacklist)) + return -ENODEV; + return asus_wmi_register_driver(&asus_nb_wmi_driver); } diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c index b96d172eb2c1..12d5ab7e1f5d 100644 --- a/drivers/platform/x86/intel-uncore-frequency.c +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -53,7 +53,7 @@ static int uncore_max_entries __read_mostly; /* Storage for uncore data for all instances */ static struct uncore_data *uncore_instances; /* Root of the all uncore sysfs kobjs */ -struct kobject *uncore_root_kobj; +static struct kobject *uncore_root_kobj; /* Stores the CPU mask of the target CPUs to use during uncore read/write */ static cpumask_t uncore_cpu_mask; /* CPU online callback register instance */ diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index d2a5d4c36715..7c8bdab078cf 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -255,7 +255,7 @@ static const struct pmc_bit_map *ext_cnp_pfear_map[] = { }; static const struct pmc_bit_map icl_pfear_map[] = { - /* Ice Lake generation onwards only */ + /* Ice Lake and Jasper Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, {"RES_67", BIT(2)}, @@ -274,7 +274,7 @@ static const struct pmc_bit_map *ext_icl_pfear_map[] = { }; static const struct pmc_bit_map tgl_pfear_map[] = { - /* Tiger Lake, Elkhart Lake and Jasper Lake generation onwards only */ + /* Tiger Lake and Elkhart Lake generation onwards only */ {"PSF9", BIT(0)}, {"RES_66", BIT(1)}, {"RES_67", BIT(2)}, @@ -692,7 +692,6 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev, kfree(lpm_regs); } -#if IS_ENABLED(CONFIG_DEBUG_FS) static bool slps0_dbg_latch; static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) @@ -1133,15 +1132,6 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) &pmc_core_substate_l_sts_regs_fops); } } -#else -static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) -{ -} - -static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) -{ -} -#endif /* CONFIG_DEBUG_FS */ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map), @@ -1156,7 +1146,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map), {} }; @@ -1260,13 +1250,11 @@ static int pmc_core_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP - static bool warn_on_s0ix_failures; module_param(warn_on_s0ix_failures, bool, 0644); MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); -static int pmc_core_suspend(struct device *dev) +static __maybe_unused int pmc_core_suspend(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); @@ -1318,7 +1306,7 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) return false; } -static int pmc_core_resume(struct device *dev) +static __maybe_unused int pmc_core_resume(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; @@ -1348,8 +1336,6 @@ static int pmc_core_resume(struct device *dev) return 0; } -#endif - static const struct dev_pm_ops pmc_core_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume) }; diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 0d50b2402abe..5eae55d80226 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -282,9 +282,7 @@ struct pmc_dev { u32 base_addr; void __iomem *regbase; const struct pmc_reg_map *map; -#if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; -#endif /* CONFIG_DEBUG_FS */ int pmc_xram_read_bit; struct mutex lock; /* generic mutex lock for PMC Core */ diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c index 946ac2dc08ae..cc4f9cba6856 100644 --- a/drivers/platform/x86/surface3_power.c +++ b/drivers/platform/x86/surface3_power.c @@ -522,8 +522,8 @@ static int mshw0011_probe(struct i2c_client *client) strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE); bat0 = i2c_acpi_new_device(dev, 1, &board_info); - if (!bat0) - return -ENOMEM; + if (IS_ERR(bat0)) + return PTR_ERR(bat0); data->bat0 = bat0; i2c_set_clientdata(bat0, data); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 8eaadbaf8ffa..0f704484ae1d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -9548,7 +9548,7 @@ static ssize_t tpacpi_battery_store(int what, if (!battery_info.batteries[battery].start_support) return -ENODEV; /* valid values are [0, 99] */ - if (value < 0 || value > 99) + if (value > 99) return -EINVAL; if (value > battery_info.batteries[battery].charge_stop) return -EINVAL; diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c index 601cbb282f54..54a2546bb93b 100644 --- a/drivers/platform/x86/xiaomi-wmi.c +++ b/drivers/platform/x86/xiaomi-wmi.c @@ -23,7 +23,7 @@ struct xiaomi_wmi { unsigned int key_code; }; -int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) +static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) { struct xiaomi_wmi *data; @@ -48,7 +48,7 @@ int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) return input_register_device(data->input_dev); } -void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy) +static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy) { struct xiaomi_wmi *data; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c0ab6e7bc129..db8e069be3a0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -7048,17 +7048,17 @@ int qeth_stop(struct net_device *dev) unsigned int i; /* Quiesce the NAPI instances: */ - qeth_for_each_output_queue(card, queue, i) { + qeth_for_each_output_queue(card, queue, i) napi_disable(&queue->napi); - del_timer_sync(&queue->timer); - } /* Stop .ndo_start_xmit, might still access queue->napi. */ netif_tx_disable(dev); - /* Queues may get re-allocated, so remove the NAPIs here. */ - qeth_for_each_output_queue(card, queue, i) + qeth_for_each_output_queue(card, queue, i) { + del_timer_sync(&queue->timer); + /* Queues may get re-allocated, so remove the NAPIs. */ netif_napi_del(&queue->napi); + } } else { netif_tx_disable(dev); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d190db5ea7d9..1d9a4866f9a7 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3732,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->flags.fw_started) @@ -3750,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_sess_deletion(base_vha); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - - set_bit(UNLOADING, &base_vha->dpc_flags); - qla_nvme_delete(base_vha); dma_free_coherent(&ha->pdev->dev, @@ -4864,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) struct qla_work_evt *e; uint8_t bail; + if (test_bit(UNLOADING, &vha->dpc_flags)) + return NULL; + QLA_VHA_MARK_BUSY(vha, bail); if (bail) return NULL; @@ -6628,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -6645,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) return; } - qla2x00_wait_for_sess_deletion(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; - set_bit(UNLOADING, &base_vha->dpc_flags); + qla2x00_wait_for_sess_deletion(base_vha); qla2x00_delete_all_vps(ha, base_vha); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 47835c4b4ee0..06c260f6cdae 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2284,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_RUNNING: case SDEV_CREATED_BLOCK: + case SDEV_QUIESCE: case SDEV_OFFLINE: break; default: diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 08d1bbbebf2d..e84b4fb493d6 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file) } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); - if (!cfp) + if (!cfp) { + comedi_dev_put(dev); return -ENOMEM; + } cfp->dev = dev; diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c index 83026ba63d1c..78a7c1b3448a 100644 --- a/drivers/staging/comedi/drivers/dt2815.c +++ b/drivers/staging/comedi/drivers/dt2815.c @@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, int ret; for (i = 0; i < insn->n; i++) { + /* FIXME: lo bit 0 chooses voltage output or current output */ lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; hi = (data[i] & 0xff0) >> 4; @@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, if (ret) return ret; + outb(hi, dev->iobase + DT2815_DATA); + devpriv->ao_readback[chan] = data[i]; } return i; diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c index a2d67c28f530..5f0e089573a2 100644 --- a/drivers/staging/gasket/gasket_sysfs.c +++ b/drivers/staging/gasket/gasket_sysfs.c @@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device, } mutex_lock(&mapping->mutex); - for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER); - i++) { + for (i = 0; attrs[i].attr.attr.name != NULL; i++) { if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) { dev_err(device, "Maximum number of sysfs nodes reached for device\n"); diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h index 1d0eed66a7f4..ab5aa351d555 100644 --- a/drivers/staging/gasket/gasket_sysfs.h +++ b/drivers/staging/gasket/gasket_sysfs.h @@ -30,10 +30,6 @@ */ #define GASKET_SYSFS_MAX_NODES 196 -/* End markers for sysfs struct arrays. */ -#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END -#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN) - /* * Terminator struct for a gasket_sysfs_attr array. Must be at the end of * all gasket_sysfs_attribute arrays. diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index 41b73f9670e2..ac3b188984d0 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, case VNT_KEY_PAIRWISE: key_mode |= mode; key_inx = 4; - /* Don't save entry for pairwise key for station mode */ - if (priv->op_mode == NL80211_IFTYPE_STATION) - clear_bit(entry, &priv->key_entry_inuse); break; default: return -EINVAL; @@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct ieee80211_key_conf *key) { - struct ieee80211_bss_conf *conf = &vif->bss_conf; struct vnt_private *priv = hw->priv; u8 *mac_addr = NULL; u8 key_dec_mode = 0; @@ -154,16 +150,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, return -EOPNOTSUPP; } - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE, key_dec_mode, true); - } else { - vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, + else + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - vnt_set_keymode(hw, (u8 *)conf->bssid, key, - VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - } - return 0; } diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 8e7269c87ea9..5f78cad3b647 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -625,8 +625,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) priv->op_mode = vif->type; - vnt_set_bss_mode(priv); - /* LED blink on TX */ vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER); @@ -713,7 +711,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->basic_rates = conf->basic_rates; vnt_update_top_rates(priv); - vnt_set_bss_mode(priv); dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates); } @@ -742,11 +739,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->short_slot_time = false; vnt_set_short_slot_time(priv); - vnt_update_ifs(priv); vnt_set_vga_gain_offset(priv, priv->bb_vga[0]); vnt_update_pre_ed_threshold(priv, false); } + if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT)) + vnt_set_bss_mode(priv); + if (changed & BSS_CHANGED_TXPOWER) vnt_rf_setpower(priv, priv->current_rate, conf->chandef.chan->hw_value); @@ -770,12 +770,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, - conf->sync_tsf, priv->current_tsf); - vnt_mac_set_beacon_interval(priv, conf->beacon_int); vnt_reset_next_tbtt(priv, conf->beacon_int); + + vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, + conf->sync_tsf, priv->current_tsf); + + vnt_update_next_tbtt(priv, + conf->sync_tsf, conf->beacon_int); } else { vnt_clear_current_tsf(priv); @@ -809,15 +812,11 @@ static void vnt_configure(struct ieee80211_hw *hw, { struct vnt_private *priv = hw->priv; u8 rx_mode = 0; - int rc; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; - rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, - MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); - - if (!rc) - rx_mode = RCR_MULTICAST | RCR_BROADCAST; + vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, + MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode); @@ -856,8 +855,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case SET_KEY: return vnt_set_keys(hw, sta, vif, key); case DISABLE_KEY: - if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) + if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) { clear_bit(key->hw_key_idx, &priv->key_entry_inuse); + + vnt_mac_disable_keyentry(priv, key->hw_key_idx); + } + default: break; } diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index eae211e5860f..91b62c3dff7b 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -207,7 +207,8 @@ static void vnt_int_process_data(struct vnt_private *priv) priv->wake_up_count = priv->hw->conf.listen_interval; - --priv->wake_up_count; + if (priv->wake_up_count) + --priv->wake_up_count; /* Turn on wake up to listen next beacon */ if (priv->wake_up_count == 1) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 51ffd5c002de..1c181d31f4c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) target_to_linux_sector(dev, cmd->t_task_lba), target_to_linux_sector(dev, sbc_get_write_same_sectors(cmd)), - GFP_KERNEL, false); + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); if (ret) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig index 31b7e1b03749..d1b27b0522a3 100644 --- a/drivers/tty/hvc/Kconfig +++ b/drivers/tty/hvc/Kconfig @@ -88,7 +88,7 @@ config HVC_DCC config HVC_RISCV_SBI bool "RISC-V SBI console support" - depends on RISCV_SBI + depends on RISCV_SBI_V01 select HVC_DRIVER help This enables support for console output via RISC-V SBI calls, which diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 27284a2dcd2b..436cc51c92c3 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) vtermnos[index] = vtermno; cons_ops[index] = ops; - /* reserve all indices up to and including this index */ - if (last_hvc < index) - last_hvc = index; - /* check if we need to re-register the kernel console */ hvc_check_console(index); @@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, cons_ops[i] == hp->ops) break; - /* no matching slot, just use a counter */ - if (i >= MAX_NR_HVC_CONSOLES) - i = ++last_hvc; + if (i >= MAX_NR_HVC_CONSOLES) { + + /* find 'empty' slot for console */ + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { + } + + /* no matching slot, just use a counter */ + if (i == MAX_NR_HVC_CONSOLES) + i = ++last_hvc + MAX_NR_HVC_CONSOLES; + } hp->index = i; - cons_ops[i] = ops; - vtermnos[i] = vtermno; + if (i < MAX_NR_HVC_CONSOLES) { + cons_ops[i] = ops; + vtermnos[i] = vtermno; + } list_add_tail(&(hp->next), &hvc_structs); mutex_unlock(&hvc_structs_mutex); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index fbaa4ec85560..e2138e7d5dc6 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) tty_port_init(&info->port); info->port.ops = &rocket_port_ops; info->flags &= ~ROCKET_MODE_MASK; - switch (pc104[board][line]) { - case 422: - info->flags |= ROCKET_MODE_RS422; - break; - case 485: - info->flags |= ROCKET_MODE_RS485; - break; - case 232: - default: + if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1)) + switch (pc104[board][line]) { + case 422: + info->flags |= ROCKET_MODE_RS422; + break; + case 485: + info->flags |= ROCKET_MODE_RS485; + break; + case 232: + default: + info->flags |= ROCKET_MODE_RS232; + break; + } + else info->flags |= ROCKET_MODE_RS232; - break; - } info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) { diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 0aea76cd67ff..adf9e80e7dc9 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -86,7 +86,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST config SERIAL_EARLYCON_RISCV_SBI bool "Early console using RISC-V SBI" - depends on RISCV_SBI + depends on RISCV_SBI_V01 select SERIAL_CORE select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 5674da2b76f0..ed0aa5c0d9b7 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -843,8 +843,10 @@ static int bcm_uart_probe(struct platform_device *pdev) if (IS_ERR(clk) && pdev->dev.of_node) clk = of_clk_get(pdev->dev.of_node, 0); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + clk_put(clk); return -ENODEV; + } port->iotype = UPIO_MEM; port->irq = res_irq->start; diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index 42c8cc93b603..c149f8c30007 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev) return PTR_ERR(owl_port->clk); } + ret = clk_prepare_enable(owl_port->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable clk\n"); + return ret; + } + owl_port->port.dev = &pdev->dev; owl_port->port.line = pdev->id; owl_port->port.type = PORT_OWL; @@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev) uart_remove_one_port(&owl_uart_driver, &owl_port->port); owl_uart_ports[pdev->id] = NULL; + clk_disable_unprepare(owl_port->clk); return 0; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index c073aa7001c4..e1179e74a2b8 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -870,9 +870,16 @@ static void sci_receive_chars(struct uart_port *port) tty_insert_flip_char(tport, c, TTY_NORMAL); } else { for (i = 0; i < count; i++) { - char c = serial_port_in(port, SCxRDR); - - status = serial_port_in(port, SCxSR); + char c; + + if (port->type == PORT_SCIF || + port->type == PORT_HSCIF) { + status = serial_port_in(port, SCxSR); + c = serial_port_in(port, SCxRDR); + } else { + c = serial_port_in(port, SCxRDR); + status = serial_port_in(port, SCxSR); + } if (uart_handle_sysrq_char(port, c)) { count--; i--; continue; diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index eafada8fb6fa..e35073e93a5b 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -567,6 +567,9 @@ static int hv_probe(struct platform_device *op) sunserial_console_match(&sunhv_console, op->dev.of_node, &sunhv_reg, port->line, false); + /* We need to initialize lock even for non-registered console */ + spin_lock_init(&port->lock); + err = uart_add_one_port(&sunhv_reg, port); if (err) goto out_unregister_driver; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 6b26f767768e..ac137b6a1dc1 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -26,13 +26,15 @@ #define CDNS_UART_TTY_NAME "ttyPS" #define CDNS_UART_NAME "xuartps" +#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ +#define CDNS_UART_MINOR 0 /* works best with devtmpfs */ +#define CDNS_UART_NR_PORTS 16 #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ #define CDNS_UART_REGISTER_SPACE 0x1000 #define TX_TIMEOUT 500000 /* Rx Trigger level */ static int rx_trigger_level = 56; -static int uartps_major; module_param(rx_trigger_level, uint, 0444); MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); @@ -188,7 +190,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255"); * @pclk: APB clock * @cdns_uart_driver: Pointer to UART driver * @baud: Current baud rate - * @id: Port ID * @clk_rate_change_nb: Notifier block for clock changes * @quirks: Flags for RXBS support. */ @@ -198,7 +199,6 @@ struct cdns_uart { struct clk *pclk; struct uart_driver *cdns_uart_driver; unsigned int baud; - int id; struct notifier_block clk_rate_change_nb; u32 quirks; bool cts_override; @@ -1133,6 +1133,8 @@ static const struct uart_ops cdns_uart_ops = { #endif }; +static struct uart_driver cdns_uart_uart_driver; + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /** * cdns_uart_console_putchar - write the character to the FIFO buffer @@ -1272,6 +1274,16 @@ static int cdns_uart_console_setup(struct console *co, char *options) return uart_set_options(port, co, baud, parity, bits, flow); } + +static struct console cdns_uart_console = { + .name = CDNS_UART_TTY_NAME, + .write = cdns_uart_console_write, + .device = uart_console_device, + .setup = cdns_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */ + .data = &cdns_uart_uart_driver, +}; #endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */ #ifdef CONFIG_PM_SLEEP @@ -1403,89 +1415,8 @@ static const struct of_device_id cdns_uart_of_match[] = { }; MODULE_DEVICE_TABLE(of, cdns_uart_of_match); -/* - * Maximum number of instances without alias IDs but if there is alias - * which target "< MAX_UART_INSTANCES" range this ID can't be used. - */ -#define MAX_UART_INSTANCES 32 - -/* Stores static aliases list */ -static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES); -static int alias_bitmap_initialized; - -/* Stores actual bitmap of allocated IDs with alias IDs together */ -static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES); -/* Protect bitmap operations to have unique IDs */ -static DEFINE_MUTEX(bitmap_lock); - -static int cdns_get_id(struct platform_device *pdev) -{ - int id, ret; - - mutex_lock(&bitmap_lock); - - /* Alias list is stable that's why get alias bitmap only once */ - if (!alias_bitmap_initialized) { - ret = of_alias_get_alias_list(cdns_uart_of_match, "serial", - alias_bitmap, MAX_UART_INSTANCES); - if (ret && ret != -EOVERFLOW) { - mutex_unlock(&bitmap_lock); - return ret; - } - - alias_bitmap_initialized++; - } - - /* Make sure that alias ID is not taken by instance without alias */ - bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES); - - dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n", - MAX_UART_INSTANCES, bitmap); - - /* Look for a serialN alias */ - id = of_alias_get_id(pdev->dev.of_node, "serial"); - if (id < 0) { - dev_warn(&pdev->dev, - "No serial alias passed. Using the first free id\n"); - - /* - * Start with id 0 and check if there is no serial0 alias - * which points to device which is compatible with this driver. - * If alias exists then try next free position. - */ - id = 0; - - for (;;) { - dev_info(&pdev->dev, "Checking id %d\n", id); - id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id); - - /* No free empty instance */ - if (id == MAX_UART_INSTANCES) { - dev_err(&pdev->dev, "No free ID\n"); - mutex_unlock(&bitmap_lock); - return -EINVAL; - } - - dev_dbg(&pdev->dev, "The empty id is %d\n", id); - /* Check if ID is empty */ - if (!test_and_set_bit(id, bitmap)) { - /* Break the loop if bit is taken */ - dev_dbg(&pdev->dev, - "Selected ID %d allocation passed\n", - id); - break; - } - dev_dbg(&pdev->dev, - "Selected ID %d allocation failed\n", id); - /* if taking bit fails then try next one */ - id++; - } - } - - mutex_unlock(&bitmap_lock); - - return id; -} +/* Temporary variable for storing number of instances */ +static int instances; /** * cdns_uart_probe - Platform driver probe @@ -1495,16 +1426,11 @@ static int cdns_get_id(struct platform_device *pdev) */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, irq; + int rc, id, irq; struct uart_port *port; struct resource *res; struct cdns_uart *cdns_uart_data; const struct of_device_id *match; - struct uart_driver *cdns_uart_uart_driver; - char *driver_name; -#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - struct console *cdns_uart_console; -#endif cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), GFP_KERNEL); @@ -1514,64 +1440,35 @@ static int cdns_uart_probe(struct platform_device *pdev) if (!port) return -ENOMEM; - cdns_uart_uart_driver = devm_kzalloc(&pdev->dev, - sizeof(*cdns_uart_uart_driver), - GFP_KERNEL); - if (!cdns_uart_uart_driver) - return -ENOMEM; - - cdns_uart_data->id = cdns_get_id(pdev); - if (cdns_uart_data->id < 0) - return cdns_uart_data->id; + /* Look for a serialN alias */ + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (id < 0) + id = 0; - /* There is a need to use unique driver name */ - driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d", - CDNS_UART_NAME, cdns_uart_data->id); - if (!driver_name) { - rc = -ENOMEM; - goto err_out_id; + if (id >= CDNS_UART_NR_PORTS) { + dev_err(&pdev->dev, "Cannot get uart_port structure\n"); + return -ENODEV; } - cdns_uart_uart_driver->owner = THIS_MODULE; - cdns_uart_uart_driver->driver_name = driver_name; - cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME; - cdns_uart_uart_driver->major = uartps_major; - cdns_uart_uart_driver->minor = cdns_uart_data->id; - cdns_uart_uart_driver->nr = 1; - + if (!cdns_uart_uart_driver.state) { + cdns_uart_uart_driver.owner = THIS_MODULE; + cdns_uart_uart_driver.driver_name = CDNS_UART_NAME; + cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME; + cdns_uart_uart_driver.major = CDNS_UART_MAJOR; + cdns_uart_uart_driver.minor = CDNS_UART_MINOR; + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console), - GFP_KERNEL); - if (!cdns_uart_console) { - rc = -ENOMEM; - goto err_out_id; - } - - strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME, - sizeof(cdns_uart_console->name)); - cdns_uart_console->index = cdns_uart_data->id; - cdns_uart_console->write = cdns_uart_console_write; - cdns_uart_console->device = uart_console_device; - cdns_uart_console->setup = cdns_uart_console_setup; - cdns_uart_console->flags = CON_PRINTBUFFER; - cdns_uart_console->data = cdns_uart_uart_driver; - cdns_uart_uart_driver->cons = cdns_uart_console; + cdns_uart_uart_driver.cons = &cdns_uart_console; #endif - rc = uart_register_driver(cdns_uart_uart_driver); - if (rc < 0) { - dev_err(&pdev->dev, "Failed to register driver\n"); - goto err_out_id; + rc = uart_register_driver(&cdns_uart_uart_driver); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to register driver\n"); + return rc; + } } - cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; - - /* - * Setting up proper name_base needs to be done after uart - * registration because tty_driver structure is not filled. - * name_base is 0 by default. - */ - cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id; + cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver; match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); if (match && match->data) { @@ -1649,6 +1546,7 @@ static int cdns_uart_probe(struct platform_device *pdev) port->ops = &cdns_uart_ops; port->fifosize = CDNS_UART_FIFO_SIZE; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE); + port->line = id; /* * Register the port. @@ -1680,7 +1578,7 @@ static int cdns_uart_probe(struct platform_device *pdev) console_port = port; #endif - rc = uart_add_one_port(cdns_uart_uart_driver, port); + rc = uart_add_one_port(&cdns_uart_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); @@ -1690,13 +1588,15 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver->cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) console_port = NULL; #endif - uartps_major = cdns_uart_uart_driver->tty_driver->major; cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, "cts-override"); + + instances++; + return 0; err_out_pm_disable: @@ -1712,12 +1612,8 @@ err_out_clk_disable: err_out_clk_dis_pclk: clk_disable_unprepare(cdns_uart_data->pclk); err_out_unregister_driver: - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); -err_out_id: - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); + if (!instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } @@ -1740,10 +1636,6 @@ static int cdns_uart_remove(struct platform_device *pdev) #endif rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port); port->mapbase = 0; - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); clk_disable_unprepare(cdns_uart_data->uartclk); clk_disable_unprepare(cdns_uart_data->pclk); pm_runtime_disable(&pdev->dev); @@ -1756,13 +1648,8 @@ static int cdns_uart_remove(struct platform_device *pdev) console_port = NULL; #endif - /* If this is last instance major number should be initialized */ - mutex_lock(&bitmap_lock); - if (bitmap_empty(bitmap, MAX_UART_INSTANCES)) - uartps_major = 0; - mutex_unlock(&bitmap_lock); - - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); + if (!--instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 5e0d0813da55..0dc3878794fd 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -74,6 +74,7 @@ int sysrq_mask(void) return 1; return sysrq_enabled; } +EXPORT_SYMBOL_GPL(sysrq_mask); /* * A value of 1 means 'all', other nonzero values are an op mask: @@ -1058,6 +1059,7 @@ int sysrq_toggle_support(int enable_mask) return 0; } +EXPORT_SYMBOL_GPL(sysrq_toggle_support); static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, struct sysrq_key_op *remove_op_p) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 309a39197be0..e5ffed795e4c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -81,6 +81,7 @@ #include <linux/errno.h> #include <linux/kd.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/major.h> #include <linux/mm.h> #include <linux/console.h> @@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = kmalloc(memsize, GFP_KERNEL); + p = vmalloc(memsize); if (!p) return NULL; @@ -366,7 +367,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - kfree(vc->vc_uni_screen); + vfree(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -1206,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > (4 << 20)) + if (new_screen_size > KMALLOC_MAX_SIZE) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 84d6f7df09a4..ded8d93834ca 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval && retval != -EPERM) + if (retval && retval != -EPERM && retval != -ENODEV) dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); + else + dev_vdbg(&acm->control->dev, + "control resubmission terminated %d\n", retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); + } else { + dev_vdbg(&acm->data->dev, "intended failure %d\n", res); } set_bit(index, &acm->read_urbs_free); return res; @@ -471,6 +476,7 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; bool stopped = false; bool stalled = false; + bool cooldown = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); @@ -497,6 +503,14 @@ static void acm_read_bulk_callback(struct urb *urb) __func__, status); stopped = true; break; + case -EOVERFLOW: + case -EPROTO: + dev_dbg(&acm->data->dev, + "%s - cooling babbling device\n", __func__); + usb_mark_last_busy(acm->dev); + set_bit(rb->index, &acm->urbs_in_error_delay); + cooldown = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -518,9 +532,11 @@ static void acm_read_bulk_callback(struct urb *urb) */ smp_mb__after_atomic(); - if (stopped || stalled) { + if (stopped || stalled || cooldown) { if (stalled) schedule_work(&acm->work); + else if (cooldown) + schedule_delayed_work(&acm->dwork, HZ / 2); return; } @@ -557,14 +573,20 @@ static void acm_softint(struct work_struct *work) struct acm *acm = container_of(work, struct acm, work); if (test_bit(EVENT_RX_STALL, &acm->flags)) { - if (!(usb_autopm_get_interface(acm->data))) { + smp_mb(); /* against acm_suspend() */ + if (!acm->susp_count) { for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_clear_halt(acm->dev, acm->in); acm_submit_read_urbs(acm, GFP_KERNEL); - usb_autopm_put_interface(acm->data); + clear_bit(EVENT_RX_STALL, &acm->flags); } - clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { + for (i = 0; i < ACM_NR; i++) + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) + acm_submit_read_urb(acm, i, GFP_NOIO); } if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) @@ -1333,6 +1355,7 @@ made_compressed_probe: acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; INIT_WORK(&acm->work, acm_softint); + INIT_DELAYED_WORK(&acm->dwork, acm_softint); init_waitqueue_head(&acm->wioctl); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); @@ -1542,6 +1565,7 @@ static void acm_disconnect(struct usb_interface *intf) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1584,6 +1608,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); + acm->urbs_in_error_delay = 0; return 0; } diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index ca1c026382c2..cd5e9d8ab237 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -109,8 +109,11 @@ struct acm { # define EVENT_TTY_WAKEUP 0 # define EVENT_RX_STALL 1 # define ACM_THROTTLED 2 +# define ACM_ERROR_DELAY 3 + unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ struct usb_cdc_line_coding line; /* bits, stop, parity */ - struct work_struct work; /* work queue entry for line discipline waking up */ + struct work_struct work; /* work queue entry for various purposes*/ + struct delayed_work dwork; /* for cool downs needed in error recovery */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ unsigned int ctrlout; /* output control lines (DTR, RTS) */ struct async_icount iocount; /* counters for control line changes */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 54cd8ef795ec..2b6565c06c23 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1223,6 +1223,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif + /* Don't set the change_bits when the device + * was powered off. + */ + if (test_bit(port1, hub->power_bits)) + set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -2723,13 +2728,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry, { int old_scheme_first_port = port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; - int quick_enumeration = (udev->speed == USB_SPEED_HIGH); if (udev->speed >= USB_SPEED_SUPER) return false; - return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first - || quick_enumeration); + return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first); } /* Is a USB 3.0 port in the Inactive or Compliance Mode state? @@ -3088,6 +3091,15 @@ static int check_port_resume_type(struct usb_device *udev, if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); + + /* + * Whatever made this reset-resume necessary may have + * turned on the port1 bit in hub->change_bits. But after + * a successful reset-resume we want the bit to be clear; + * if it was on it would indicate that something happened + * following the reset-resume. + */ + clear_bit(port1, hub->change_bits); } return status; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index d5f834f16993..a48678a0c83a 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -589,12 +589,13 @@ void usb_sg_cancel(struct usb_sg_request *io) int i, retval; spin_lock_irqsave(&io->lock, flags); - if (io->status) { + if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; + io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { @@ -608,6 +609,12 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } + + spin_lock_irqsave(&io->lock, flags); + io->count--; + if (!io->count) + complete(&io->complete); + spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index da30b5664ff3..3e8efe759c3e 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 RGB RAPDIFIRE */ + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 6846eb0cba13..4c171a8e215f 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -307,10 +307,14 @@ /* Global TX Fifo Size Register */ #define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */ -#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */ -#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) +#define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff) #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) +/* Global RX Fifo Size Register */ +#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) + /* Global Event Size Registers */ #define DWC3_GEVNTSIZ_INTMASK BIT(31) #define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4d3c79d90a6e..00746c2848c0 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1728,7 +1728,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) u32 reg; u8 link_state; - u8 speed; /* * According to the Databook Remote wakeup request should @@ -1738,16 +1737,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) */ reg = dwc3_readl(dwc->regs, DWC3_DSTS); - speed = reg & DWC3_DSTS_CONNECTSPD; - if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) - return 0; - link_state = DWC3_DSTS_USBLNKST(reg); switch (link_state) { + case DWC3_LINK_STATE_RESET: case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ + case DWC3_LINK_STATE_RESUME: break; default: return -EINVAL; @@ -2227,7 +2223,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; int mdwidth; - int kbytes; int size; mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); @@ -2236,24 +2231,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); if (dwc3_is_usb31(dwc)) - size = DWC31_GTXFIFOSIZ_TXFDEF(size); + size = DWC31_GTXFIFOSIZ_TXFDEP(size); else - size = DWC3_GTXFIFOSIZ_TXFDEF(size); + size = DWC3_GTXFIFOSIZ_TXFDEP(size); /* FIFO Depth is in MDWDITH bytes. Multiply */ size *= mdwidth; - kbytes = size / 1024; - if (kbytes == 0) - kbytes = 1; - /* - * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for - * internal overhead. We don't really know how these are used, - * but documentation say it exists. + * To meet performance requirement, a minimum TxFIFO size of 3x + * MaxPacketSize is recommended for endpoints that support burst and a + * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't + * support burst. Use those numbers and we can calculate the max packet + * limit as below. */ - size -= mdwidth * (kbytes + 1); - size /= kbytes; + if (dwc->maximum_speed >= USB_SPEED_SUPER) + size /= 3; + else + size /= 2; usb_ep_set_maxpacket_limit(&dep->endpoint, size); @@ -2271,8 +2266,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; + int mdwidth; + int size; + + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); - usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); + /* MDWIDTH is represented in bits, convert to bytes */ + mdwidth /= 8; + + /* All OUT endpoints share a single RxFIFO space */ + size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); + if (dwc3_is_usb31(dwc)) + size = DWC31_GRXFIFOSIZ_RXFDEP(size); + else + size = DWC3_GRXFIFOSIZ_RXFDEP(size); + + /* FIFO depth is in MDWDITH bytes */ + size *= mdwidth; + + /* + * To meet performance requirement, a minimum recommended RxFIFO size + * is defined as follow: + * RxFIFO size >= (3 x MaxPacketSize) + + * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) + * + * Then calculate the max packet limit as below. + */ + size -= (3 * 8) + 16; + if (size < 0) + size = 0; + else + size /= 3; + + usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, @@ -2484,14 +2510,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { - /* - * For OUT direction, host may send less than the setup - * length. Return true for all OUT requests. - */ - if (!req->direction) - return true; - - return req->request.actual == req->request.length; + return req->num_pending_sgs == 0; } static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, @@ -2515,8 +2534,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) || - req->num_pending_sgs) { + if (!dwc3_gadget_ep_request_completed(req)) { __dwc3_gadget_kick_transfer(dep); goto out; } diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index 971c6b92484a..171280c80228 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -728,19 +728,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) case COMP_USB_TRANSACTION_ERROR: case COMP_STALL_ERROR: default: - if (ep_id == XDBC_EPID_OUT) + if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) xdbc.flags |= XDBC_FLAGS_OUT_STALL; - if (ep_id == XDBC_EPID_IN) + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) xdbc.flags |= XDBC_FLAGS_IN_STALL; xdbc_trace("endpoint %d stalled\n", ep_id); break; } - if (ep_id == XDBC_EPID_IN) { + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); - } else if (ep_id == XDBC_EPID_OUT) { + } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; } else { xdbc_trace("invalid endpoint id %d\n", ep_id); diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 673686eeddd7..6e2b7266a695 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -120,8 +120,22 @@ struct xdbc_ring { u32 cycle_state; }; -#define XDBC_EPID_OUT 2 -#define XDBC_EPID_IN 3 +/* + * These are the "Endpoint ID" (also known as "Context Index") values for the + * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data + * structure. + * According to the "eXtensible Host Controller Interface for Universal Serial + * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer + * Rings", these should be 0 and 1, and those are the values AMD machines give + * you; but Intel machines seem to use the formula from section "4.5.1 Device + * Context Index", which is supposed to be used for the Device Context only. + * Luckily the values from Intel don't overlap with those from AMD, so we can + * just test for both. + */ +#define XDBC_EPID_OUT 0 +#define XDBC_EPID_IN 1 +#define XDBC_EPID_OUT_INTEL 2 +#define XDBC_EPID_IN_INTEL 3 struct xdbc_state { u16 vendor; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index c81023b195c3..10f01f974f67 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1813,6 +1813,10 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; + + ffs->ms_os_descs_ext_prop_count = 0; + ffs->ms_os_descs_ext_prop_name_len = 0; + ffs->ms_os_descs_ext_prop_data_len = 0; } diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 76406343fbe5..ca7d95bf7397 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -81,6 +81,7 @@ static int raw_event_queue_add(struct raw_event_queue *queue, static struct usb_raw_event *raw_event_queue_fetch( struct raw_event_queue *queue) { + int ret; unsigned long flags; struct usb_raw_event *event; @@ -89,11 +90,18 @@ static struct usb_raw_event *raw_event_queue_fetch( * there's at least one event queued by decrementing the semaphore, * and then take the lock to protect queue struct fields. */ - if (down_interruptible(&queue->sema)) - return NULL; + ret = down_interruptible(&queue->sema); + if (ret) + return ERR_PTR(ret); spin_lock_irqsave(&queue->lock, flags); - if (WARN_ON(!queue->size)) - return NULL; + /* + * queue->size must have the same value as queue->sema counter (before + * the down_interruptible() call above), so this check is a fail-safe. + */ + if (WARN_ON(!queue->size)) { + spin_unlock_irqrestore(&queue->lock, flags); + return ERR_PTR(-ENODEV); + } event = queue->events[0]; queue->size--; memmove(&queue->events[0], &queue->events[1], @@ -392,9 +400,8 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) char *udc_device_name; unsigned long flags; - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); - if (ret) - return ret; + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) + return -EFAULT; switch (arg.speed) { case USB_SPEED_UNKNOWN: @@ -501,15 +508,13 @@ out_unlock: static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) { - int ret = 0; struct usb_raw_event arg; unsigned long flags; struct usb_raw_event *event; uint32_t length; - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); - if (ret) - return ret; + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) + return -EFAULT; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_RUNNING) { @@ -525,25 +530,31 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) spin_unlock_irqrestore(&dev->lock, flags); event = raw_event_queue_fetch(&dev->queue); - if (!event) { + if (PTR_ERR(event) == -EINTR) { dev_dbg(&dev->gadget->dev, "event fetching interrupted\n"); return -EINTR; } + if (IS_ERR(event)) { + dev_err(&dev->gadget->dev, "failed to fetch event\n"); + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_FAILED; + spin_unlock_irqrestore(&dev->lock, flags); + return -ENODEV; + } length = min(arg.length, event->length); - ret = copy_to_user((void __user *)value, event, - sizeof(*event) + length); - return ret; + if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) + return -EFAULT; + + return 0; } static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, bool get_from_user) { - int ret; void *data; - ret = copy_from_user(io, ptr, sizeof(*io)); - if (ret) - return ERR_PTR(ret); + if (copy_from_user(io, ptr, sizeof(*io))) + return ERR_PTR(-EFAULT); if (io->ep >= USB_RAW_MAX_ENDPOINTS) return ERR_PTR(-EINVAL); if (!usb_raw_io_flags_valid(io->flags)) @@ -658,12 +669,13 @@ static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep0_io(dev, &io, data, false); - if (ret < 0) { - kfree(data); - return ret; - } + if (ret) + goto free; + length = min(io.length, (unsigned int)ret); - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) + ret = -EFAULT; +free: kfree(data); return ret; } @@ -952,12 +964,13 @@ static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep_io(dev, &io, data, false); - if (ret < 0) { - kfree(data); - return ret; - } + if (ret) + goto free; + length = min(io.length, (unsigned int)ret); - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) + ret = -EFAULT; +free: kfree(data); return ret; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 6e0432141c40..22200341c8ec 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1951,10 +1951,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid) usba_start(udc); } else { udc->suspended = false; - usba_stop(udc); - if (udc->driver->disconnect) udc->driver->disconnect(&udc->gadget); + + usba_stop(udc); } udc->vbus_prev = vbus; } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index a4d9b5e1e50e..d49c6dc1082d 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req, { struct bdc *bdc = ep->bdc; - if (req == NULL || &req->queue == NULL || &req->usb_req == NULL) + if (req == NULL) return; dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 9eca1fe81061..f37316d2c8fa 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1571,6 +1571,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) } if ((temp & PORT_RC)) reset_change = true; + if (temp & PORT_OC) + status = 1; } if (!status && !reset_change) { xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); @@ -1636,6 +1638,13 @@ retry: port_index); goto retry; } + /* bail out if port detected a over-current condition */ + if (t1 & PORT_OC) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); + return -EBUSY; + } /* suspend ports in U0, or bail out for new connect changes */ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { if ((t1 & PORT_CSC) && wake_enabled) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a78787bb5133..0fda0c0f4d31 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -547,6 +547,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, stream_id); return; } + /* + * A cancelled TD can complete with a stall if HW cached the trb. + * In this case driver can't find cur_td, but if the ring is empty we + * can move the dequeue pointer to the current enqueue position. + */ + if (!cur_td) { + if (list_empty(&ep_ring->td_list)) { + state->new_deq_seg = ep_ring->enq_seg; + state->new_deq_ptr = ep_ring->enqueue; + state->new_cycle_state = ep_ring->cycle_state; + goto done; + } else { + xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); + return; + } + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Finding endpoint context"); @@ -592,6 +609,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = new_seg; state->new_deq_ptr = new_deq; +done: /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cycle state = 0x%x", state->new_cycle_state); @@ -1856,8 +1874,8 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, if (reset_type == EP_HARD_RESET) { ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); - xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, + td); } xhci_ring_cmd_db(xhci); } @@ -1978,11 +1996,18 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - /* Issue a reset endpoint command to clear the host side - * halt, followed by a set dequeue command to move the - * dequeue pointer past the TD. - * The class driver clears the device side halt later. + /* + * xhci internal endpoint state will go to a "halt" state for + * any stall, including default control pipe protocol stall. + * To clear the host side halt we need to issue a reset endpoint + * command, followed by a set dequeue command to move past the + * TD. + * Class drivers clear the device side halt from a functional + * stall later. Hub TT buffer should only be cleared for FS/LS + * devices behind HS hubs for functional stalls. */ + if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, td, EP_HARD_RESET); } else { @@ -2539,6 +2564,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", slot_id, ep_index); } + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + xhci_cleanup_halted_endpoint(xhci, slot_id, + ep_index, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); + } goto cleanup; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fe38275363e0..bee5deccc83d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3031,19 +3031,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, added_ctxs, added_ctxs); } -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td) +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td) { struct xhci_dequeue_state deq_state; - struct usb_device *udev = td->urb->dev; xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Cleaning up stalled endpoint ring"); /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, stream_id, td, &deq_state); + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, + &deq_state); if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) return; @@ -3054,7 +3054,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, + xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, &deq_state); } else { /* Better hope no one uses the input context between now and the @@ -3065,7 +3065,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Setting up input context for " "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, + xhci_setup_input_ctx_for_quirk(xhci, slot_id, ep_index, &deq_state); } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 3289bb516201..86cfefdd6632 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2116,8 +2116,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td); +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td); void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 2ab9600d0898..fc8a5da4a07c 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, /* High level: Gfx (indexed) register access */ #ifdef CONFIG_USB_SISUSBVGA_CON -int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data) +int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) { return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } -int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data) +int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) { return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } #endif -int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data) { int ret; @@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 *data) { int ret; @@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, +int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor) { int ret; @@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, } static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, - int port, u8 idx, u8 data, u8 mask) + u32 port, u8 idx, u8 data, u8 mask) { int ret; u8 tmp; @@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, return ret; } -int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor) { return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); } -int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand) { return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); @@ -2785,8 +2785,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig) static int sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y, unsigned long arg) { - int retval, port, length; - u32 address; + int retval, length; + u32 port, address; /* All our commands require the device * to be initialized. diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h index 1782c759c4ad..ace09985dae4 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_init.h +++ b/drivers/usb/misc/sisusbvga/sisusb_init.h @@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = { int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); -extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); -extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data); -extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); +extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); +extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data); -extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 * data); -extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor); -extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor); -extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand); void sisusb_delete(struct kref *kref); diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3670fda02c34..d592071119ba 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo); static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status); +/* + * This driver needs its own workqueue, as we need to control memory allocation. + * + * In the course of error handling and power management uas_wait_for_pending_cmnds() + * needs to flush pending work items. In these contexts we cannot allocate memory + * by doing block IO as we would deadlock. For the same reason we cannot wait + * for anything allocating memory not heeding these constraints. + * + * So we have to control all work items that can be on the workqueue we flush. + * Hence we cannot share a queue and need our own. + */ +static struct workqueue_struct *workqueue; + static void uas_do_work(struct work_struct *work) { struct uas_dev_info *devinfo = @@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work) if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } out: spin_unlock_irqrestore(&devinfo->lock, flags); @@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo) lockdep_assert_held(&devinfo->lock); cmdinfo->state |= IS_IN_WORK_LIST; - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } static void uas_zap_pending(struct uas_dev_info *devinfo, int result) @@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, struct uas_cmd_info *ci = (void *)&cmnd->SCp; struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; + if (status == -ENODEV) /* too late */ + return; + scmd_printk(KERN_INFO, cmnd, "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", prefix, status, cmdinfo->uas_tag, @@ -1226,7 +1242,31 @@ static struct usb_driver uas_driver = { .id_table = uas_usb_ids, }; -module_usb_driver(uas_driver); +static int __init uas_init(void) +{ + int rv; + + workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0); + if (!workqueue) + return -ENOMEM; + + rv = usb_register(&uas_driver); + if (rv) { + destroy_workqueue(workqueue); + return -ENOMEM; + } + + return 0; +} + +static void __exit uas_exit(void) +{ + usb_deregister(&uas_driver); + destroy_workqueue(workqueue); +} + +module_init(uas_init); +module_exit(uas_exit); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1880f3e13f57..f6c3681fa2e9 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2323,6 +2323,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000, USB_SC_DEVICE,USB_PR_DEVICE,NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Cyril Roelandt <tipecaml@gmail.com> */ +UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, "iRiver", diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index c823122f9cb7..e8ddb81cb6df 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -198,7 +198,10 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm); const struct typec_altmode * typec_altmode_get_partner(struct typec_altmode *adev) { - return adev ? &to_altmode(adev)->partner->adev : NULL; + if (!adev || !to_altmode(adev)->partner) + return NULL; + + return &to_altmode(adev)->partner->adev; } EXPORT_SYMBOL_GPL(typec_altmode_get_partner); diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c index 46457c133d2b..7afe275b17d0 100644 --- a/drivers/usb/typec/mux/pi3usb30532.c +++ b/drivers/usb/typec/mux/pi3usb30532.c @@ -114,8 +114,8 @@ pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state) static int pi3usb30532_probe(struct i2c_client *client) { struct device *dev = &client->dev; - struct typec_switch_desc sw_desc; - struct typec_mux_desc mux_desc; + struct typec_switch_desc sw_desc = { }; + struct typec_mux_desc mux_desc = { }; struct pi3usb30532 *pi; int ret; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index de3576e6530a..82b19ebd7838 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3794,6 +3794,14 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, */ break; + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore CC changes here. + */ + break; + default: if (tcpm_port_is_disconnected(port)) tcpm_set_state(port, unattached_state(port), 0); @@ -3855,6 +3863,15 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port) case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: break; } @@ -3908,10 +3925,19 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) case PORT_RESET_WAIT_OFF: tcpm_set_state(port, tcpm_default_state(port), 0); break; + case SRC_TRY_WAIT: case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 85b32c325282..cc1d64765ce7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -342,8 +342,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (is_invalid_reserved_pfn(*pfn)) + if (!follow_pfn(vma, vaddr, pfn) && + is_invalid_reserved_pfn(*pfn)) ret = 0; } done: @@ -555,7 +555,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, continue; } - remote_vaddr = dma->vaddr + iova - dma->iova; + remote_vaddr = dma->vaddr + (iova - dma->iova); ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], do_accounting); if (ret) @@ -2345,10 +2345,10 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, vaddr = dma->vaddr + offset; if (write) - *copied = __copy_to_user((void __user *)vaddr, data, + *copied = copy_to_user((void __user *)vaddr, data, count) ? 0 : count; else - *copied = __copy_from_user(data, (void __user *)vaddr, + *copied = copy_from_user(data, (void __user *)vaddr, count) ? 0 : count; if (kthread) unuse_mm(mm); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index e36aaf9ba7bd..fb4e944c4d0d 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); - added = true; - - /* Deliver to monitoring devices all correctly transmitted - * packets. + /* Deliver to monitoring devices all packets that we + * will transmit. */ virtio_transport_deliver_tap_pkt(pkt); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); + added = true; + pkt->off += payload_len; total_len += payload_len; @@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * to send it with the next available buffer. */ if (pkt->off < pkt->len) { + /* We are queueing the same virtio_vsock_pkt to handle + * the remaining bytes, and we want to deliver it + * to monitoring devices in the next iteration. + */ + pkt->tap_delivered = false; + spin_lock_bh(&vsock->send_pkt_list_lock); list_add(&pkt->list, &vsock->send_pkt_list); spin_unlock_bh(&vsock->send_pkt_list_lock); @@ -543,6 +549,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) mutex_unlock(&vq->mutex); } + /* Some packets may have been queued before the device was started, + * let's kick the send worker to send them. + */ + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); + mutex_unlock(&vsock->dev.mutex); return 0; |