diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-08 12:36:45 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-08 12:36:45 -0800 |
commit | d71369dbe0c5c1217dc681d6871b7918b2996de6 (patch) | |
tree | 4686e56d034fc8969e89d6b5c86b3eace0eeec2d /drivers/md | |
parent | 689659c988193f1e16bc34bfda3f333b11528c1f (diff) | |
parent | c6d3ab9e76dc01011392cf8309f7e684b94ec464 (diff) | |
download | lwn-d71369dbe0c5c1217dc681d6871b7918b2996de6.tar.gz lwn-d71369dbe0c5c1217dc681d6871b7918b2996de6.zip |
Merge tag 'block-6.7-2023-12-08' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe:
"Nothing major in here, just miscellanous fixes for MD and NVMe:
- NVMe pull request via Keith:
- Proper nvme ctrl state setting (Keith)
- Passthrough command optimization (Keith)
- Spectre fix (Nitesh)
- Kconfig clarifications (Shin'ichiro)
- Frozen state deadlock fix (Bitao)
- Power setting quirk (Georg)
- MD pull requests via Song:
- 6.7 regresisons with recovery/sync (Yu)
- Reshape fix (David)"
* tag 'block-6.7-2023-12-08' of git://git.kernel.dk/linux:
md: split MD_RECOVERY_NEEDED out of mddev_resume
nvme-pci: Add sleep quirk for Kingston drives
md: fix stopping sync thread
md: don't leave 'MD_RECOVERY_FROZEN' in error path of md_set_readonly()
md: fix missing flush of sync_work
nvme: fix deadlock between reset and scan
nvme: prevent potential spectre v1 gadget
nvme: improve NVME_HOST_AUTH and NVME_TARGET_AUTH config descriptions
nvme-ioctl: move capable() admin check to the end
nvme: ensure reset state check ordering
nvme: introduce helper function to get ctrl state
md/raid6: use valid sector values to determine if an I/O should wait on the reshape
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/md.c | 144 | ||||
-rw-r--r-- | drivers/md/raid5.c | 4 |
2 files changed, 78 insertions, 70 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index c94373d64f2c..b066abbffd10 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -490,7 +490,7 @@ int mddev_suspend(struct mddev *mddev, bool interruptible) } EXPORT_SYMBOL_GPL(mddev_suspend); -void mddev_resume(struct mddev *mddev) +static void __mddev_resume(struct mddev *mddev, bool recovery_needed) { lockdep_assert_not_held(&mddev->reconfig_mutex); @@ -507,12 +507,18 @@ void mddev_resume(struct mddev *mddev) percpu_ref_resurrect(&mddev->active_io); wake_up(&mddev->sb_wait); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + if (recovery_needed) + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ mutex_unlock(&mddev->suspend_mutex); } + +void mddev_resume(struct mddev *mddev) +{ + return __mddev_resume(mddev, true); +} EXPORT_SYMBOL_GPL(mddev_resume); /* @@ -4840,25 +4846,29 @@ action_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", type); } -static void stop_sync_thread(struct mddev *mddev) +/** + * stop_sync_thread() - wait for sync_thread to stop if it's running. + * @mddev: the array. + * @locked: if set, reconfig_mutex will still be held after this function + * return; if not set, reconfig_mutex will be released after this + * function return. + * @check_seq: if set, only wait for curent running sync_thread to stop, noted + * that new sync_thread can still start. + */ +static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) { - if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - return; + int sync_seq; - if (mddev_lock(mddev)) - return; + if (check_seq) + sync_seq = atomic_read(&mddev->sync_seq); - /* - * Check again in case MD_RECOVERY_RUNNING is cleared before lock is - * held. - */ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - mddev_unlock(mddev); + if (!locked) + mddev_unlock(mddev); return; } - if (work_pending(&mddev->del_work)) - flush_workqueue(md_misc_wq); + mddev_unlock(mddev); set_bit(MD_RECOVERY_INTR, &mddev->recovery); /* @@ -4866,21 +4876,28 @@ static void stop_sync_thread(struct mddev *mddev) * never happen */ md_wakeup_thread_directly(mddev->sync_thread); + if (work_pending(&mddev->sync_work)) + flush_work(&mddev->sync_work); - mddev_unlock(mddev); + wait_event(resync_wait, + !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + (check_seq && sync_seq != atomic_read(&mddev->sync_seq))); + + if (locked) + mddev_lock_nointr(mddev); } static void idle_sync_thread(struct mddev *mddev) { - int sync_seq = atomic_read(&mddev->sync_seq); - mutex_lock(&mddev->sync_mutex); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev); - wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) || - !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + if (mddev_lock(mddev)) { + mutex_unlock(&mddev->sync_mutex); + return; + } + stop_sync_thread(mddev, false, true); mutex_unlock(&mddev->sync_mutex); } @@ -4888,11 +4905,13 @@ static void frozen_sync_thread(struct mddev *mddev) { mutex_lock(&mddev->sync_mutex); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev); - wait_event(resync_wait, mddev->sync_thread == NULL && - !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + if (mddev_lock(mddev)) { + mutex_unlock(&mddev->sync_mutex); + return; + } + stop_sync_thread(mddev, false, false); mutex_unlock(&mddev->sync_mutex); } @@ -6264,14 +6283,7 @@ static void md_clean(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev) { - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (work_pending(&mddev->del_work)) - flush_workqueue(md_misc_wq); - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev); - } - + stop_sync_thread(mddev, true, false); del_timer_sync(&mddev->safemode_timer); if (mddev->pers && mddev->pers->quiesce) { @@ -6355,25 +6367,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) int err = 0; int did_freeze = 0; + if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) + return -EBUSY; + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { did_freeze = 1; set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); md_wakeup_thread(mddev->thread); } - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - /* - * Thread might be blocked waiting for metadata update which will now - * never happen - */ - md_wakeup_thread_directly(mddev->sync_thread); - - if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) - return -EBUSY; - mddev_unlock(mddev); - wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, - &mddev->recovery)); + stop_sync_thread(mddev, false, false); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); @@ -6383,29 +6386,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { pr_warn("md: %s still in use.\n",mdname(mddev)); - if (did_freeze) { - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); - } err = -EBUSY; goto out; } + if (mddev->pers) { __md_stop_writes(mddev); - err = -ENXIO; - if (mddev->ro == MD_RDONLY) + if (mddev->ro == MD_RDONLY) { + err = -ENXIO; goto out; + } + mddev->ro = MD_RDONLY; set_disk_ro(mddev->gendisk, 1); + } + +out: + if ((mddev->pers && !err) || did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_state); - err = 0; } -out: + mutex_unlock(&mddev->open_mutex); return err; } @@ -6426,20 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode, set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); md_wakeup_thread(mddev->thread); } - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - - /* - * Thread might be blocked waiting for metadata update which will now - * never happen - */ - md_wakeup_thread_directly(mddev->sync_thread); - mddev_unlock(mddev); - wait_event(resync_wait, (mddev->sync_thread == NULL && - !test_bit(MD_RECOVERY_RUNNING, - &mddev->recovery))); - mddev_lock_nointr(mddev); + stop_sync_thread(mddev, true, false); mutex_lock(&mddev->open_mutex); if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || @@ -9403,7 +9395,15 @@ static void md_start_sync(struct work_struct *ws) goto not_running; } - suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev); + mddev_unlock(mddev); + /* + * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should + * not set it again. Otherwise, we may cause issue like this one: + * https://bugzilla.kernel.org/show_bug.cgi?id=218200 + * Therefore, use __mddev_resume(mddev, false). + */ + if (suspend) + __mddev_resume(mddev, false); md_wakeup_thread(mddev->sync_thread); sysfs_notify_dirent_safe(mddev->sysfs_action); md_new_event(); @@ -9415,7 +9415,15 @@ not_running: clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev); + mddev_unlock(mddev); + /* + * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should + * not set it again. Otherwise, we may cause issue like this one: + * https://bugzilla.kernel.org/show_bug.cgi?id=218200 + * Therefore, use __mddev_resume(mddev, false). + */ + if (suspend) + __mddev_resume(mddev, false); wake_up(&resync_wait); if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dc031d42f53b..26e1e8a5e941 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5892,11 +5892,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf, int dd_idx; for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { - if (dd_idx == sh->pd_idx) + if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) continue; min_sector = min(min_sector, sh->dev[dd_idx].sector); - max_sector = min(max_sector, sh->dev[dd_idx].sector); + max_sector = max(max_sector, sh->dev[dd_idx].sector); } spin_lock_irq(&conf->device_lock); |