summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-08 11:55:04 +0100
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 09:53:59 +0200
commit6ea427bbbd4078297bb1dbd6c5cb83f3f48aac46 (patch)
tree7ed574fcca3cae5d6a50211d43c3d5ab21620d3a /arch/s390/mm
parentb2d73b2a0ad1c758cb0c1acb01a911744b845942 (diff)
downloadlwn-6ea427bbbd4078297bb1dbd6c5cb83f3f48aac46.tar.gz
lwn-6ea427bbbd4078297bb1dbd6c5cb83f3f48aac46.zip
s390/mm: add reference counter to gmap structure
Let's use a reference counter mechanism to control the lifetime of gmap structures. This will be needed for further changes related to gmap shadows. Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c90
1 files changed, 70 insertions, 20 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 480c076afceb..fe25f1915800 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -21,13 +21,13 @@
#include <asm/tlb.h>
/**
- * gmap_alloc - allocate a guest address space
+ * gmap_alloc - allocate and initialize a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
*/
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
+static struct gmap *gmap_alloc(unsigned long limit)
{
struct gmap *gmap;
struct page *page;
@@ -58,7 +58,7 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
spin_lock_init(&gmap->guest_table_lock);
- gmap->mm = mm;
+ atomic_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL, 2);
if (!page)
goto out_free;
@@ -70,9 +70,6 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
gmap->asce = atype | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table);
gmap->asce_end = limit;
- spin_lock(&mm->context.gmap_lock);
- list_add_rcu(&gmap->list, &mm->context.gmap_list);
- spin_unlock(&mm->context.gmap_lock);
return gmap;
out_free:
@@ -80,7 +77,28 @@ out_free:
out:
return NULL;
}
-EXPORT_SYMBOL_GPL(gmap_alloc);
+
+/**
+ * gmap_create - create a guest address space
+ * @mm: pointer to the parent mm_struct
+ * @limit: maximum size of the gmap address space
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
+{
+ struct gmap *gmap;
+
+ gmap = gmap_alloc(limit);
+ if (!gmap)
+ return NULL;
+ gmap->mm = mm;
+ spin_lock(&mm->context.gmap_lock);
+ list_add_rcu(&gmap->list, &mm->context.gmap_list);
+ spin_unlock(&mm->context.gmap_lock);
+ return gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_create);
static void gmap_flush_tlb(struct gmap *gmap)
{
@@ -118,21 +136,10 @@ static void gmap_radix_tree_free(struct radix_tree_root *root)
* gmap_free - free a guest address space
* @gmap: pointer to the guest address space structure
*/
-void gmap_free(struct gmap *gmap)
+static void gmap_free(struct gmap *gmap)
{
struct page *page, *next;
- /* Flush tlb. */
- if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
- else
- __tlb_flush_global();
-
- spin_lock(&gmap->mm->context.gmap_lock);
- list_del_rcu(&gmap->list);
- spin_unlock(&gmap->mm->context.gmap_lock);
- synchronize_rcu();
-
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, 2);
@@ -140,7 +147,50 @@ void gmap_free(struct gmap *gmap)
gmap_radix_tree_free(&gmap->host_to_guest);
kfree(gmap);
}
-EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_get - increase reference counter for guest address space
+ * @gmap: pointer to the guest address space structure
+ *
+ * Returns the gmap pointer
+ */
+struct gmap *gmap_get(struct gmap *gmap)
+{
+ atomic_inc(&gmap->ref_count);
+ return gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_get);
+
+/**
+ * gmap_put - decrease reference counter for guest address space
+ * @gmap: pointer to the guest address space structure
+ *
+ * If the reference counter reaches zero the guest address space is freed.
+ */
+void gmap_put(struct gmap *gmap)
+{
+ if (atomic_dec_return(&gmap->ref_count) == 0)
+ gmap_free(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_put);
+
+/**
+ * gmap_remove - remove a guest address space but do not free it yet
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_remove(struct gmap *gmap)
+{
+ /* Flush tlb. */
+ gmap_flush_tlb(gmap);
+ /* Remove gmap from the pre-mm list */
+ spin_lock(&gmap->mm->context.gmap_lock);
+ list_del_rcu(&gmap->list);
+ spin_unlock(&gmap->mm->context.gmap_lock);
+ synchronize_rcu();
+ /* Put reference */
+ gmap_put(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_remove);
/**
* gmap_enable - switch primary space to the guest address space