summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-06-04 15:08:22 -0300
committerAvi Kivity <avi@redhat.com>2009-09-10 08:32:49 +0300
commit64a2268dcfc9c3626aa7f70902690e2fc10c1630 (patch)
tree51e37e86dc9a75f10708ff4df7614c63dfa7c079 /virt
parent9f4cc12765ea48a40347449d6802a3322ced8709 (diff)
downloadlwn-64a2268dcfc9c3626aa7f70902690e2fc10c1630.tar.gz
lwn-64a2268dcfc9c3626aa7f70902690e2fc10c1630.zip
KVM: move coalesced_mmio locking to its own device
Move coalesced_mmio locking to its own device, instead of relying on kvm->lock. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/coalesced_mmio.c10
-rw-r--r--virt/kvm/coalesced_mmio.h1
2 files changed, 5 insertions, 6 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 754906800999..397f41936698 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -31,10 +31,6 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
if (!is_write)
return 0;
- /* kvm->lock is taken by the caller and must be not released before
- * dev.read/write
- */
-
/* Are we able to batch it ? */
/* last is the first free entry
@@ -43,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
*/
ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail < 1) {
+ if (avail < KVM_MAX_VCPUS) {
/* full */
return 0;
}
@@ -70,7 +66,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
- /* kvm->lock must be taken by caller before call to in_range()*/
+ spin_lock(&dev->lock);
/* copy data in first free entry of the ring */
@@ -79,6 +75,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+ spin_unlock(&dev->lock);
}
static void coalesced_mmio_destructor(struct kvm_io_device *this)
@@ -101,6 +98,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
+ spin_lock_init(&dev->lock);
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 5ac0ec628461..4b49f27fa31e 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -12,6 +12,7 @@
struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
+ spinlock_t lock;
int nb_zones;
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
};