summaryrefslogtreecommitdiff
path: root/drivers/net/ehea/ehea_qmr.c
diff options
context:
space:
mode:
authorThomas Klein <tklein@de.ibm.com>2008-10-27 10:38:46 +0100
committerJeff Garzik <jgarzik@redhat.com>2008-10-27 14:49:55 -0400
commit3fd09c45bfbcf77949ed6db36e67c1681424fedb (patch)
tree73a5a3122c69c68489905c065e810abe9288e95c /drivers/net/ehea/ehea_qmr.c
parent74d5e8acd95ae934194303138a43b60005fcfad6 (diff)
downloadlwn-3fd09c45bfbcf77949ed6db36e67c1681424fedb.tar.gz
lwn-3fd09c45bfbcf77949ed6db36e67c1681424fedb.zip
ehea: Detect 16GB hugepages for firmware restriction
All kernel memory which is used for kernel/hardware data transfer must be registered with firmware using "memory regions". 16GB hugepages may not be part of a memory region due to firmware restrictions. This patch modifies the walk_memory_resource callback fn to filter hugepages and add only standard memory to the busmap which is later on used for MR registration. Signed-off-by: Thomas Klein <tklein@de.ibm.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ehea/ehea_qmr.c')
-rw-r--r--drivers/net/ehea/ehea_qmr.c57
1 files changed, 52 insertions, 5 deletions
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 9b61dc9865d1..9d006878f045 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -632,10 +632,13 @@ static void ehea_rebuild_busmap(void)
}
}
-static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add)
+static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
{
unsigned long i, start_section, end_section;
+ if (!nr_pages)
+ return 0;
+
if (!ehea_bmap) {
ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
if (!ehea_bmap)
@@ -643,7 +646,7 @@ static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add)
}
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
- end_section = start_section + ((pgnum * PAGE_SIZE) / EHEA_SECTSIZE);
+ end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
/* Mark entries as valid or invalid only; address is assigned later */
for (i = start_section; i < end_section; i++) {
u64 flag;
@@ -692,10 +695,54 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
return ret;
}
-static int ehea_create_busmap_callback(unsigned long pfn,
- unsigned long nr_pages, void *arg)
+static int ehea_is_hugepage(unsigned long pfn)
+{
+ int page_order;
+
+ if (pfn & EHEA_HUGEPAGE_PFN_MASK)
+ return 0;
+
+ page_order = compound_order(pfn_to_page(pfn));
+ if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ return 0;
+
+ return 1;
+}
+
+static int ehea_create_busmap_callback(unsigned long initial_pfn,
+ unsigned long total_nr_pages, void *arg)
{
- return ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
+ int ret;
+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
+
+ if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
+ return ehea_update_busmap(initial_pfn, total_nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+
+ /* Given chunk is >= 16GB -> check for hugepages */
+ start_pfn = initial_pfn;
+ end_pfn = initial_pfn + total_nr_pages;
+ pfn = start_pfn;
+
+ while (pfn < end_pfn) {
+ if (ehea_is_hugepage(pfn)) {
+ /* Add mem found in front of the hugepage */
+ nr_pages = pfn - start_pfn;
+ ret = ehea_update_busmap(start_pfn, nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+ if (ret)
+ return ret;
+
+ /* Skip the hugepage */
+ pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
+ start_pfn = pfn;
+ } else
+ pfn += (EHEA_SECTSIZE / PAGE_SIZE);
+ }
+
+ /* Add mem found behind the hugepage(s) */
+ nr_pages = pfn - start_pfn;
+ return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
}
int ehea_create_busmap(void)