diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2023-10-31 10:54:48 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2023-10-31 10:54:48 -0300 |
commit | 162e3480246ef69386d4647d2320d86741bf08a2 (patch) | |
tree | caada42ce40114e7abe5e74acb54781ad4aa424a /drivers/infiniband/hw/mlx5/mr.c | |
parent | d4b2d165714c0ce8777d5131f6e0aad617b7adc4 (diff) | |
parent | ffc253263a1375a65fa6c9f62a893e9767fbebfa (diff) | |
download | lwn-162e3480246ef69386d4647d2320d86741bf08a2.tar.gz lwn-162e3480246ef69386d4647d2320d86741bf08a2.zip |
Merge tag 'v6.6' into rdma.git for-next
Resolve conflict by taking the spin_lock hunk from for-next:
https://lore.kernel.org/r/20230928113851.5197a1ec@canb.auug.org.au
Required for the next patch.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b0fa2d644973..1d6c54a53df6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -239,7 +239,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) { - set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, + ent->dev->umrc.pd); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); @@ -998,19 +999,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) if (!dev->cache.wq) return; - cancel_delayed_work_sync(&dev->cache.remove_ent_dwork); mutex_lock(&dev->cache.rb_lock); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); spin_lock_irq(&ent->mkeys_queue.lock); ent->disabled = true; spin_unlock_irq(&ent->mkeys_queue.lock); - cancel_delayed_work_sync(&ent->dwork); } + mutex_unlock(&dev->cache.rb_lock); + + /* + * After all entries are disabled and will not reschedule on WQ, + * flush it and all async commands. + */ + flush_workqueue(dev->cache.wq); mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); + /* At this point all entries are disabled and have no concurrent work. */ + mutex_lock(&dev->cache.rb_lock); node = rb_first(root); while (node) { ent = rb_entry(node, struct mlx5_cache_ent, node); |