diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 1046 |
1 files changed, 566 insertions, 480 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0239fbff7bf7..9d3852c521c7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -92,14 +92,14 @@ * */ +static int +i915_get_ggtt_vma_pages(struct i915_vma *vma); + const struct i915_ggtt_view i915_ggtt_view_normal; const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED }; -static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); -static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); - static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) { bool has_aliasing_ppgtt; @@ -146,14 +146,33 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) return has_aliasing_ppgtt ? 1 : 0; } -static void ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); -static void ppgtt_unbind_vma(struct i915_vma *vma); +static int ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + u32 pte_flags = 0; + + /* Currently applicable only to VLV */ + if (vma->obj->gt_ro) + pte_flags |= PTE_READ_ONLY; -static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid) + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, + cache_level, pte_flags); + + return 0; +} + +static void ppgtt_unbind_vma(struct i915_vma *vma) +{ + vma->vm->clear_range(vma->vm, + vma->node.start, + vma->obj->base.size, + true); +} + +static gen8_pte_t gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid) { gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; pte |= addr; @@ -173,9 +192,9 @@ static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr, return pte; } -static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) +static gen8_pde_t gen8_pde_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) { gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; @@ -285,8 +304,8 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, #define i915_dma_unmap_single(px, dev) \ __i915_dma_unmap_single((px)->daddr, dev) -static inline void __i915_dma_unmap_single(dma_addr_t daddr, - struct drm_device *dev) +static void __i915_dma_unmap_single(dma_addr_t daddr, + struct drm_device *dev) { struct device *device = &dev->pdev->dev; @@ -307,9 +326,9 @@ static inline void __i915_dma_unmap_single(dma_addr_t daddr, #define i915_dma_map_single(px, dev) \ i915_dma_map_page_single((px)->page, (dev), &(px)->daddr) -static inline int i915_dma_map_page_single(struct page *page, - struct drm_device *dev, - dma_addr_t *daddr) +static int i915_dma_map_page_single(struct page *page, + struct drm_device *dev, + dma_addr_t *daddr) { struct device *device = &dev->pdev->dev; @@ -320,7 +339,7 @@ static inline int i915_dma_map_page_single(struct page *page, return 0; } -static void unmap_and_free_pt(struct i915_page_table_entry *pt, +static void unmap_and_free_pt(struct i915_page_table *pt, struct drm_device *dev) { if (WARN_ON(!pt->page)) @@ -332,9 +351,27 @@ static void unmap_and_free_pt(struct i915_page_table_entry *pt, kfree(pt); } -static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) +static void gen8_initialize_pt(struct i915_address_space *vm, + struct i915_page_table *pt) +{ + gen8_pte_t *pt_vaddr, scratch_pte; + int i; + + pt_vaddr = kmap_atomic(pt->page); + scratch_pte = gen8_pte_encode(vm->scratch.addr, + I915_CACHE_LLC, true); + + for (i = 0; i < GEN8_PTES; i++) + pt_vaddr[i] = scratch_pte; + + if (!HAS_LLC(vm->dev)) + drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); + kunmap_atomic(pt_vaddr); +} + +static struct i915_page_table *alloc_pt_single(struct drm_device *dev) { - struct i915_page_table_entry *pt; + struct i915_page_table *pt; const size_t count = INTEL_INFO(dev)->gen >= 8 ? GEN8_PTES : GEN6_PTES; int ret = -ENOMEM; @@ -369,78 +406,55 @@ fail_bitmap: return ERR_PTR(ret); } -/** - * alloc_pt_range() - Allocate a multiple page tables - * @pd: The page directory which will have at least @count entries - * available to point to the allocated page tables. - * @pde: First page directory entry for which we are allocating. - * @count: Number of pages to allocate. - * @dev: DRM device. - * - * Allocates multiple page table pages and sets the appropriate entries in the - * page table structure within the page directory. Function cleans up after - * itself on any failures. - * - * Return: 0 if allocation succeeded. - */ -static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, - struct drm_device *dev) -{ - int i, ret; - - /* 512 is the max page tables per page_directory on any platform. */ - if (WARN_ON(pde + count > I915_PDES)) - return -EINVAL; - - for (i = pde; i < pde + count; i++) { - struct i915_page_table_entry *pt = alloc_pt_single(dev); - - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto err_out; - } - WARN(pd->page_table[i], - "Leaking page directory entry %d (%p)\n", - i, pd->page_table[i]); - pd->page_table[i] = pt; - } - - return 0; - -err_out: - while (i-- > pde) - unmap_and_free_pt(pd->page_table[i], dev); - return ret; -} - -static void unmap_and_free_pd(struct i915_page_directory_entry *pd) +static void unmap_and_free_pd(struct i915_page_directory *pd, + struct drm_device *dev) { if (pd->page) { + i915_dma_unmap_single(pd, dev); __free_page(pd->page); + kfree(pd->used_pdes); kfree(pd); } } -static struct i915_page_directory_entry *alloc_pd_single(void) +static struct i915_page_directory *alloc_pd_single(struct drm_device *dev) { - struct i915_page_directory_entry *pd; + struct i915_page_directory *pd; + int ret = -ENOMEM; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); - pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!pd->page) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } + pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), + sizeof(*pd->used_pdes), GFP_KERNEL); + if (!pd->used_pdes) + goto free_pd; + + pd->page = alloc_page(GFP_KERNEL); + if (!pd->page) + goto free_bitmap; + + ret = i915_dma_map_single(pd, dev); + if (ret) + goto free_page; return pd; + +free_page: + __free_page(pd->page); +free_bitmap: + kfree(pd->used_pdes); +free_pd: + kfree(pd); + + return ERR_PTR(ret); } /* Broadwell Page Directory Pointer Descriptors */ -static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, - uint64_t val) +static int gen8_write_pdp(struct intel_engine_cs *ring, + unsigned entry, + dma_addr_t addr) { int ret; @@ -452,10 +466,10 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); - intel_ring_emit(ring, (u32)(val >> 32)); + intel_ring_emit(ring, upper_32_bits(addr)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); - intel_ring_emit(ring, (u32)(val)); + intel_ring_emit(ring, lower_32_bits(addr)); intel_ring_advance(ring); return 0; @@ -466,12 +480,12 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, { int i, ret; - /* bit of a hack to find the actual last used pd */ - int used_pd = ppgtt->num_pd_entries / I915_PDES; - - for (i = used_pd - 1; i >= 0; i--) { - dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; - ret = gen8_write_pdp(ring, i, addr); + for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { + struct i915_page_directory *pd = ppgtt->pdp.page_directory[i]; + dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr; + /* The page directory might be NULL, but we need to clear out + * whatever the previous context might have used. */ + ret = gen8_write_pdp(ring, i, pd_daddr); if (ret) return ret; } @@ -497,8 +511,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, I915_CACHE_LLC, use_scratch); while (num_entries) { - struct i915_page_directory_entry *pd; - struct i915_page_table_entry *pt; + struct i915_page_directory *pd; + struct i915_page_table *pt; struct page *page_table; if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) @@ -559,8 +573,8 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, break; if (pt_vaddr == NULL) { - struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe]; - struct i915_page_table_entry *pt = pd->page_table[pde]; + struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe]; + struct i915_page_table *pt = pd->page_table[pde]; struct page *page_table = pt->page; pt_vaddr = kmap_atomic(page_table); @@ -588,14 +602,43 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, } } -static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev) +static void __gen8_do_map_pt(gen8_pde_t * const pde, + struct i915_page_table *pt, + struct drm_device *dev) +{ + gen8_pde_t entry = + gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC); + *pde = entry; +} + +static void gen8_initialize_pd(struct i915_address_space *vm, + struct i915_page_directory *pd) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + gen8_pde_t *page_directory; + struct i915_page_table *pt; + int i; + + page_directory = kmap_atomic(pd->page); + pt = ppgtt->scratch_pt; + for (i = 0; i < I915_PDES; i++) + /* Map the PDE to the page table */ + __gen8_do_map_pt(page_directory + i, pt, vm->dev); + + if (!HAS_LLC(vm->dev)) + drm_clflush_virt_range(page_directory, PAGE_SIZE); + kunmap_atomic(page_directory); +} + +static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev) { int i; if (!pd->page) return; - for (i = 0; i < I915_PDES; i++) { + for_each_set_bit(i, pd->used_pdes, I915_PDES) { if (WARN_ON(!pd->page_table[i])) continue; @@ -604,163 +647,296 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d } } -static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); int i; - for (i = 0; i < ppgtt->num_pd_pages; i++) { + for_each_set_bit(i, ppgtt->pdp.used_pdpes, GEN8_LEGACY_PDPES) { if (WARN_ON(!ppgtt->pdp.page_directory[i])) continue; gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); - unmap_and_free_pd(ppgtt->pdp.page_directory[i]); + unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev); } + + unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev); + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); } -static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) +/** + * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. + * @ppgtt: Master ppgtt structure. + * @pd: Page directory for this address range. + * @start: Starting virtual address to begin allocations. + * @length Size of the allocations. + * @new_pts: Bitmap set by function with new allocations. Likely used by the + * caller to free on error. + * + * Allocate the required number of page tables. Extremely similar to + * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by + * the page directory boundary (instead of the page directory pointer). That + * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is + * possible, and likely that the caller will need to use multiple calls of this + * function to achieve the appropriate allocation. + * + * Return: 0 if success; negative error code otherwise. + */ +static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, + struct i915_page_directory *pd, + uint64_t start, + uint64_t length, + unsigned long *new_pts) { - struct pci_dev *hwdev = ppgtt->base.dev->pdev; - int i, j; - - for (i = 0; i < ppgtt->num_pd_pages; i++) { - /* TODO: In the future we'll support sparse mappings, so this - * will have to change. */ - if (!ppgtt->pdp.page_directory[i]->daddr) + struct drm_device *dev = ppgtt->base.dev; + struct i915_page_table *pt; + uint64_t temp; + uint32_t pde; + + gen8_for_each_pde(pt, pd, start, length, temp, pde) { + /* Don't reallocate page tables */ + if (pt) { + /* Scratch is never allocated this way */ + WARN_ON(pt == ppgtt->scratch_pt); continue; + } - pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); + pt = alloc_pt_single(dev); + if (IS_ERR(pt)) + goto unwind_out; - for (j = 0; j < I915_PDES; j++) { - struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; - struct i915_page_table_entry *pt; - dma_addr_t addr; + gen8_initialize_pt(&ppgtt->base, pt); + pd->page_table[pde] = pt; + set_bit(pde, new_pts); + } - if (WARN_ON(!pd->page_table[j])) - continue; + return 0; - pt = pd->page_table[j]; - addr = pt->daddr; +unwind_out: + for_each_set_bit(pde, new_pts, I915_PDES) + unmap_and_free_pt(pd->page_table[pde], dev); - if (addr) - pci_unmap_page(hwdev, addr, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - } - } + return -ENOMEM; } -static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +/** + * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. + * @ppgtt: Master ppgtt structure. + * @pdp: Page directory pointer for this address range. + * @start: Starting virtual address to begin allocations. + * @length Size of the allocations. + * @new_pds Bitmap set by function with new allocations. Likely used by the + * caller to free on error. + * + * Allocate the required number of page directories starting at the pde index of + * @start, and ending at the pde index @start + @length. This function will skip + * over already allocated page directories within the range, and only allocate + * new ones, setting the appropriate pointer within the pdp as well as the + * correct position in the bitmap @new_pds. + * + * The function will only allocate the pages within the range for a give page + * directory pointer. In other words, if @start + @length straddles a virtually + * addressed PDP boundary (512GB for 4k pages), there will be more allocations + * required by the caller, This is not currently possible, and the BUG in the + * code will prevent it. + * + * Return: 0 if success; negative error code otherwise. + */ +static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, + struct i915_page_directory_pointer *pdp, + uint64_t start, + uint64_t length, + unsigned long *new_pds) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct drm_device *dev = ppgtt->base.dev; + struct i915_page_directory *pd; + uint64_t temp; + uint32_t pdpe; - gen8_ppgtt_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); -} + WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); -static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) -{ - int i, ret; + /* FIXME: PPGTT container_of won't work for 64b */ + WARN_ON((start + length) > 0x800000000ULL); - for (i = 0; i < ppgtt->num_pd_pages; i++) { - ret = alloc_pt_range(ppgtt->pdp.page_directory[i], - 0, I915_PDES, ppgtt->base.dev); - if (ret) + gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + if (pd) + continue; + + pd = alloc_pd_single(dev); + if (IS_ERR(pd)) goto unwind_out; + + gen8_initialize_pd(&ppgtt->base, pd); + pdp->page_directory[pdpe] = pd; + set_bit(pdpe, new_pds); } return 0; unwind_out: - while (i--) - gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); + for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) + unmap_and_free_pd(pdp->page_directory[pdpe], dev); return -ENOMEM; } -static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) +static void +free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts) { int i; - for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.page_directory[i] = alloc_pd_single(); - if (IS_ERR(ppgtt->pdp.page_directory[i])) - goto unwind_out; + for (i = 0; i < GEN8_LEGACY_PDPES; i++) + kfree(new_pts[i]); + kfree(new_pts); + kfree(new_pds); +} + +/* Fills in the page directory bitmap, and the array of page tables bitmap. Both + * of these are based on the number of PDPEs in the system. + */ +static +int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, + unsigned long ***new_pts) +{ + int i; + unsigned long *pds; + unsigned long **pts; + + pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL); + if (!pds) + return -ENOMEM; + + pts = kcalloc(GEN8_LEGACY_PDPES, sizeof(unsigned long *), GFP_KERNEL); + if (!pts) { + kfree(pds); + return -ENOMEM; + } + + for (i = 0; i < GEN8_LEGACY_PDPES; i++) { + pts[i] = kcalloc(BITS_TO_LONGS(I915_PDES), + sizeof(unsigned long), GFP_KERNEL); + if (!pts[i]) + goto err_out; } - ppgtt->num_pd_pages = max_pdp; - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); + *new_pds = pds; + *new_pts = pts; return 0; -unwind_out: - while (i--) - unmap_and_free_pd(ppgtt->pdp.page_directory[i]); - +err_out: + free_gen8_temp_bitmaps(pds, pts); return -ENOMEM; } -static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) +static int gen8_alloc_va_range(struct i915_address_space *vm, + uint64_t start, + uint64_t length) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + unsigned long *new_page_dirs, **new_page_tables; + struct i915_page_directory *pd; + const uint64_t orig_start = start; + const uint64_t orig_length = length; + uint64_t temp; + uint32_t pdpe; int ret; - ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); +#ifndef CONFIG_64BIT + /* Disallow 64b address on 32b platforms. Nothing is wrong with doing + * this in hardware, but a lot of the drm code is not prepared to handle + * 64b offset on 32b platforms. + * This will be addressed when 48b PPGTT is added */ + if (start + length > 0x100000000ULL) + return -E2BIG; +#endif + + /* Wrap is never okay since we can only represent 48b, and we don't + * actually use the other side of the canonical address space. + */ + if (WARN_ON(start + length < start)) + return -ERANGE; + + ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables); if (ret) return ret; - ret = gen8_ppgtt_allocate_page_tables(ppgtt); - if (ret) - goto err_out; + /* Do the allocations first so we can easily bail out */ + ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length, + new_page_dirs); + if (ret) { + free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); + return ret; + } + + /* For every page directory referenced, allocate page tables */ + gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { + ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length, + new_page_tables[pdpe]); + if (ret) + goto err_out; + } - ppgtt->num_pd_entries = max_pdp * I915_PDES; + start = orig_start; + length = orig_length; - return 0; + /* Allocations have completed successfully, so set the bitmaps, and do + * the mappings. */ + gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { + gen8_pde_t *const page_directory = kmap_atomic(pd->page); + struct i915_page_table *pt; + uint64_t pd_len = gen8_clamp_pd(start, length); + uint64_t pd_start = start; + uint32_t pde; -err_out: - gen8_ppgtt_free(ppgtt); - return ret; -} + /* Every pd should be allocated, we just did that above. */ + WARN_ON(!pd); -static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, - const int pd) -{ - dma_addr_t pd_addr; - int ret; + gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { + /* Same reasoning as pd */ + WARN_ON(!pt); + WARN_ON(!pd_len); + WARN_ON(!gen8_pte_count(pd_start, pd_len)); - pd_addr = pci_map_page(ppgtt->base.dev->pdev, - ppgtt->pdp.page_directory[pd]->page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + /* Set our used ptes within the page table */ + bitmap_set(pt->used_ptes, + gen8_pte_index(pd_start), + gen8_pte_count(pd_start, pd_len)); - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); - if (ret) - return ret; + /* Our pde is now pointing to the pagetable, pt */ + set_bit(pde, pd->used_pdes); - ppgtt->pdp.page_directory[pd]->daddr = pd_addr; + /* Map the PDE to the page table */ + __gen8_do_map_pt(page_directory + pde, pt, vm->dev); - return 0; -} + /* NB: We haven't yet mapped ptes to pages. At this + * point we're still relying on insert_entries() */ + } -static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, - const int pd, - const int pt) -{ - dma_addr_t pt_addr; - struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd]; - struct i915_page_table_entry *ptab = pdir->page_table[pt]; - struct page *p = ptab->page; - int ret; + if (!HAS_LLC(vm->dev)) + drm_clflush_virt_range(page_directory, PAGE_SIZE); - pt_addr = pci_map_page(ppgtt->base.dev->pdev, - p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); - if (ret) - return ret; + kunmap_atomic(page_directory); - ptab->daddr = pt_addr; + set_bit(pdpe, ppgtt->pdp.used_pdpes); + } + free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); return 0; + +err_out: + while (pdpe--) { + for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) + unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev); + } + + for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) + unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev); + + free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); + return ret; } /* @@ -769,115 +945,50 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address * space. * - * FIXME: split allocation into smaller pieces. For now we only ever do this - * once, but with full PPGTT, the multiple contiguous allocations will be bad. - * TODO: Do something with the size parameter */ -static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - const int min_pt_pages = I915_PDES * max_pdp; - int i, j, ret; - - if (size % (1<<30)) - DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); - - /* 1. Do all our allocations for page directories and page tables. - * We allocate more than was asked so that we can point the unused parts - * to valid entries that point to scratch page. Dynamic page tables - * will fix this eventually. - */ - ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES); - if (ret) - return ret; - - /* - * 2. Create DMA mappings for the page directories and page tables. - */ - for (i = 0; i < GEN8_LEGACY_PDPES; i++) { - ret = gen8_ppgtt_setup_page_directories(ppgtt, i); - if (ret) - goto bail; + ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); + if (IS_ERR(ppgtt->scratch_pt)) + return PTR_ERR(ppgtt->scratch_pt); - for (j = 0; j < I915_PDES; j++) { - ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); - if (ret) - goto bail; - } - } + ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev); + if (IS_ERR(ppgtt->scratch_pd)) + return PTR_ERR(ppgtt->scratch_pd); - /* - * 3. Map all the page directory entires to point to the page tables - * we've allocated. - * - * For now, the PPGTT helper functions all require that the PDEs are - * plugged in correctly. So we do that now/here. For aliasing PPGTT, we - * will never need to touch the PDEs again. - */ - for (i = 0; i < GEN8_LEGACY_PDPES; i++) { - struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; - gen8_pde_t *pd_vaddr; - pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); - for (j = 0; j < I915_PDES; j++) { - struct i915_page_table_entry *pt = pd->page_table[j]; - dma_addr_t addr = pt->daddr; - pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, - I915_CACHE_LLC); - } - if (!HAS_LLC(ppgtt->base.dev)) - drm_clflush_virt_range(pd_vaddr, PAGE_SIZE); - kunmap_atomic(pd_vaddr); - } + gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); + gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd); - ppgtt->switch_mm = gen8_mm_switch; - ppgtt->base.clear_range = gen8_ppgtt_clear_range; - ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; - ppgtt->base.cleanup = gen8_ppgtt_cleanup; ppgtt->base.start = 0; + ppgtt->base.total = 1ULL << 32; + ppgtt->base.cleanup = gen8_ppgtt_cleanup; + ppgtt->base.allocate_va_range = gen8_alloc_va_range; + ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; + ppgtt->base.clear_range = gen8_ppgtt_clear_range; + ppgtt->base.unbind_vma = ppgtt_unbind_vma; + ppgtt->base.bind_vma = ppgtt_bind_vma; - /* This is the area that we advertise as usable for the caller */ - ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE; - - /* Set all ptes to a valid scratch page. Also above requested space */ - ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE, - true); + ppgtt->switch_mm = gen8_mm_switch; - DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", - ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); - DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", - ppgtt->num_pd_entries, - (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; - -bail: - gen8_ppgtt_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); - return ret; } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { - struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; struct i915_address_space *vm = &ppgtt->base; - gen6_pte_t __iomem *pd_addr; + struct i915_page_table *unused; gen6_pte_t scratch_pte; uint32_t pd_entry; - int pte, pde; + uint32_t pte, pde, temp; + uint32_t start = ppgtt->base.start, length = ppgtt->base.total; scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); - pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + - ppgtt->pd.pd_offset / sizeof(gen6_pte_t); - - seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, - ppgtt->pd.pd_offset, - ppgtt->pd.pd_offset + ppgtt->num_pd_entries); - for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { + gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { u32 expected; gen6_pte_t *pt_vaddr; dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; - pd_entry = readl(pd_addr + pde); + pd_entry = readl(ppgtt->pd_addr + pde); expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); if (pd_entry != expected) @@ -914,8 +1025,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) } /* Write pde (index) from the page directory @pd to the page table @pt */ -static void gen6_write_pde(struct i915_page_directory_entry *pd, - const int pde, struct i915_page_table_entry *pt) +static void gen6_write_pde(struct i915_page_directory *pd, + const int pde, struct i915_page_table *pt) { /* Caller needs to make sure the write completes if necessary */ struct i915_hw_ppgtt *ppgtt = @@ -931,10 +1042,10 @@ static void gen6_write_pde(struct i915_page_directory_entry *pd, /* Write all the page tables found in the ppgtt structure to incrementing page * directories. */ static void gen6_write_page_range(struct drm_i915_private *dev_priv, - struct i915_page_directory_entry *pd, + struct i915_page_directory *pd, uint32_t start, uint32_t length) { - struct i915_page_table_entry *pt; + struct i915_page_table *pt; uint32_t pde, temp; gen6_for_each_pde(pt, pd, start, length, temp, pde) @@ -1162,14 +1273,14 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, * are switching between contexts with the same LRCA, we also must do a force * restore. */ -static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) +static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { /* If current vm != vm, */ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; } static void gen6_initialize_pt(struct i915_address_space *vm, - struct i915_page_table_entry *pt) + struct i915_page_table *pt) { gen6_pte_t *pt_vaddr, scratch_pte; int i; @@ -1195,7 +1306,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - struct i915_page_table_entry *pt; + struct i915_page_table *pt; const uint32_t start_save = start, length_save = length; uint32_t pde, temp; int ret; @@ -1263,7 +1374,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, unwind_out: for_each_set_bit(pde, new_page_tables, I915_PDES) { - struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde]; + struct i915_page_table *pt = ppgtt->pd.page_table[pde]; ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; unmap_and_free_pt(pt, vm->dev); @@ -1273,29 +1384,23 @@ unwind_out: return ret; } -static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) -{ - int i; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - struct i915_page_table_entry *pt = ppgtt->pd.page_table[i]; - - if (pt != ppgtt->scratch_pt) - unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); - } - - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); - unmap_and_free_pd(&ppgtt->pd); -} - static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); + struct i915_page_table *pt; + uint32_t pde; + drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_free(ppgtt); + gen6_for_all_pdes(pt, ppgtt, pde) { + if (pt != ppgtt->scratch_pt) + unmap_and_free_pt(pt, ppgtt->base.dev); + } + + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev); } static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) @@ -1342,7 +1447,6 @@ alloc: if (ppgtt->node.start < dev_priv->gtt.mappable_end) DRM_DEBUG("Forced to use aperture for PDEs\n"); - ppgtt->num_pd_entries = I915_PDES; return 0; err_out: @@ -1358,14 +1462,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, uint64_t start, uint64_t length) { - struct i915_page_table_entry *unused; + struct i915_page_table *unused; uint32_t pde, temp; gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; } -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1388,23 +1492,14 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) if (ret) return ret; - if (aliasing) { - /* preallocate all pts */ - ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, - ppgtt->base.dev); - - if (ret) { - gen6_ppgtt_cleanup(&ppgtt->base); - return ret; - } - } - ppgtt->base.allocate_va_range = gen6_alloc_va_range; ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.unbind_vma = ppgtt_unbind_vma; + ppgtt->base.bind_vma = ppgtt_bind_vma; ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE; + ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; ppgtt->debug_dump = gen6_dump_ppgtt; ppgtt->pd.pd_offset = @@ -1413,10 +1508,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + ppgtt->pd.pd_offset / sizeof(gen6_pte_t); - if (aliasing) - ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); - else - gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); + gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); @@ -1430,8 +1522,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) return 0; } -static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, - bool aliasing) +static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1439,16 +1530,16 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, ppgtt->base.scratch = dev_priv->gtt.base.scratch; if (INTEL_INFO(dev)->gen < 8) - return gen6_ppgtt_init(ppgtt, aliasing); + return gen6_ppgtt_init(ppgtt); else - return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); + return gen8_ppgtt_init(ppgtt); } int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - ret = __hw_ppgtt_init(dev, ppgtt, false); + ret = __hw_ppgtt_init(dev, ppgtt); if (ret == 0) { kref_init(&ppgtt->ref); drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, @@ -1535,32 +1626,11 @@ void i915_ppgtt_release(struct kref *kref) kfree(ppgtt); } -static void -ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - /* Currently applicable only to VLV */ - if (vma->obj->gt_ro) - flags |= PTE_READ_ONLY; - - vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, - cache_level, flags); -} - -static void ppgtt_unbind_vma(struct i915_vma *vma) -{ - vma->vm->clear_range(vma->vm, - vma->node.start, - vma->obj->base.size, - true); -} - extern int intel_iommu_gfx_mapped; /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ -static inline bool needs_idle_maps(struct drm_device *dev) +static bool needs_idle_maps(struct drm_device *dev) { #ifdef CONFIG_INTEL_IOMMU /* Query intel_iommu to see if we need the workaround. Presumably that @@ -1653,67 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) i915_ggtt_flush(dev_priv); } -void i915_gem_restore_gtt_mappings(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - struct i915_address_space *vm; - - i915_check_and_clear_faults(dev); - - /* First fill our portion of the GTT with scratch pages */ - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - dev_priv->gtt.base.start, - dev_priv->gtt.base.total, - true); - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, - &dev_priv->gtt.base); - if (!vma) - continue; - - i915_gem_clflush_object(obj, obj->pin_display); - /* The bind_vma code tries to be smart about tracking mappings. - * Unfortunately above, we've just wiped out the mappings - * without telling our object about it. So we need to fake it. - * - * Bind is not expected to fail since this is only called on - * resume and assumption is all requirements exist already. - */ - vma->bound &= ~GLOBAL_BIND; - WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND)); - } - - - if (INTEL_INFO(dev)->gen >= 8) { - if (IS_CHERRYVIEW(dev)) - chv_setup_private_ppat(dev_priv); - else - bdw_setup_private_ppat(dev_priv); - - return; - } - - if (USES_PPGTT(dev)) { - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - /* TODO: Perhaps it shouldn't be gen6 specific */ - - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, - base); - - if (i915_is_ggtt(vm)) - ppgtt = dev_priv->mm.aliasing_ppgtt; - - gen6_write_page_range(dev_priv, &ppgtt->pd, - 0, ppgtt->base.total); - } - } - - i915_ggtt_flush(dev_priv); -} - int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { if (obj->has_dma_mapping) @@ -1727,7 +1736,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } -static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { #ifdef writeq writeq(pte, addr); @@ -1872,18 +1881,16 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, readl(gtt_base); } - -static void i915_ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) +static void i915_ggtt_insert_entries(struct i915_address_space *vm, + struct sg_table *pages, + uint64_t start, + enum i915_cache_level cache_level, u32 unused) { - const unsigned long entry = vma->node.start >> PAGE_SHIFT; unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - BUG_ON(!i915_is_ggtt(vma->vm)); - intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags); - vma->bound = GLOBAL_BIND; + intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); + } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -1896,62 +1903,41 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, intel_gtt_clear_range(first_entry, num_entries); } -static void i915_ggtt_unbind_vma(struct i915_vma *vma) -{ - const unsigned int first = vma->node.start >> PAGE_SHIFT; - const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; - - BUG_ON(!i915_is_ggtt(vma->vm)); - vma->bound = 0; - intel_gtt_clear_range(first, size); -} - -static void ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static int ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; struct sg_table *pages = obj->pages; + u32 pte_flags = 0; + int ret; + + ret = i915_get_ggtt_vma_pages(vma); + if (ret) + return ret; + pages = vma->ggtt_view.pages; /* Currently applicable only to VLV */ if (obj->gt_ro) - flags |= PTE_READ_ONLY; + pte_flags |= PTE_READ_ONLY; - if (i915_is_ggtt(vma->vm)) - pages = vma->ggtt_view.pages; - /* If there is no aliasing PPGTT, or the caller needs a global mapping, - * or we have a global mapping already but the cacheability flags have - * changed, set the global PTEs. - * - * If there is an aliasing PPGTT it is anecdotally faster, so use that - * instead if none of the above hold true. - * - * NB: A global mapping should only be needed for special regions like - * "gtt mappable", SNB errata, or if specified via special execbuf - * flags. At all other times, the GPU will use the aliasing PPGTT. - */ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { - if (!(vma->bound & GLOBAL_BIND) || - (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, pages, - vma->node.start, - cache_level, flags); - vma->bound |= GLOBAL_BIND; - } + vma->vm->insert_entries(vma->vm, pages, + vma->node.start, + cache_level, pte_flags); } - if (dev_priv->mm.aliasing_ppgtt && - (!(vma->bound & LOCAL_BIND) || - (cache_level != obj->cache_level))) { + if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, pages, vma->node.start, - cache_level, flags); - vma->bound |= LOCAL_BIND; + cache_level, pte_flags); } + + return 0; } static void ggtt_unbind_vma(struct i915_vma *vma) @@ -1965,16 +1951,14 @@ static void ggtt_unbind_vma(struct i915_vma *vma) vma->node.start, obj->base.size, true); - vma->bound &= ~GLOBAL_BIND; } - if (vma->bound & LOCAL_BIND) { + if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; appgtt->base.clear_range(&appgtt->base, vma->node.start, obj->base.size, true); - vma->bound &= ~LOCAL_BIND; } } @@ -2083,12 +2067,27 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, if (!ppgtt) return -ENOMEM; - ret = __hw_ppgtt_init(dev, ppgtt, true); + ret = __hw_ppgtt_init(dev, ppgtt); if (ret) { + ppgtt->base.cleanup(&ppgtt->base); kfree(ppgtt); return ret; } + if (ppgtt->base.allocate_va_range) + ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0, + ppgtt->base.total); + if (ret) { + ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); + return ret; + } + + ppgtt->base.clear_range(&ppgtt->base, + ppgtt->base.start, + ppgtt->base.total, + true); + dev_priv->mm.aliasing_ppgtt = ppgtt; } @@ -2164,14 +2163,14 @@ static void teardown_scratch_page(struct drm_device *dev) __free_page(page); } -static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) +static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; return snb_gmch_ctl << 20; } -static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) { bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; @@ -2187,7 +2186,7 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) return bdw_gmch_ctl << 20; } -static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) +static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) { gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; gmch_ctrl &= SNB_GMCH_GGMS_MASK; @@ -2198,14 +2197,14 @@ static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) return 0; } -static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) +static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; snb_gmch_ctl &= SNB_GMCH_GMS_MASK; return snb_gmch_ctl << 25; /* 32 MB units */ } -static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) +static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) { bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; @@ -2253,7 +2252,17 @@ static int ggtt_probe_common(struct drm_device *dev, gtt_phys_addr = pci_resource_start(dev->pdev, 0) + (pci_resource_len(dev->pdev, 0) / 2); - dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); + /* + * On BXT writes larger than 64 bit to the GTT pagetable range will be + * dropped. For WC mappings in general we have 64 byte burst writes + * when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (IS_BROXTON(dev)) + dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); + else + dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); if (!dev_priv->gtt.gsm) { DRM_ERROR("Failed to map the gtt page table\n"); return -ENOMEM; @@ -2375,7 +2384,7 @@ static int gen8_gmch_probe(struct drm_device *dev, *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); @@ -2384,6 +2393,8 @@ static int gen8_gmch_probe(struct drm_device *dev, dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; + dev_priv->gtt.base.bind_vma = ggtt_bind_vma; + dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; return ret; } @@ -2424,6 +2435,8 @@ static int gen6_gmch_probe(struct drm_device *dev, dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; + dev_priv->gtt.base.bind_vma = ggtt_bind_vma; + dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; return ret; } @@ -2455,7 +2468,10 @@ static int i915_gmch_probe(struct drm_device *dev, intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); + dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; + dev_priv->gtt.base.bind_vma = ggtt_bind_vma; + dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; if (unlikely(dev_priv->gtt.do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); @@ -2523,6 +2539,59 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; } +void i915_gem_restore_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + + i915_check_and_clear_faults(dev); + + /* First fill our portion of the GTT with scratch pages */ + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + dev_priv->gtt.base.start, + dev_priv->gtt.base.total, + true); + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + struct i915_vma *vma = i915_gem_obj_to_vma(obj, + &dev_priv->gtt.base); + if (!vma) + continue; + + i915_gem_clflush_object(obj, obj->pin_display); + WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); + } + + + if (INTEL_INFO(dev)->gen >= 8) { + if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) + chv_setup_private_ppat(dev_priv); + else + bdw_setup_private_ppat(dev_priv); + + return; + } + + if (USES_PPGTT(dev)) { + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + /* TODO: Perhaps it shouldn't be gen6 specific */ + + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, + base); + + if (i915_is_ggtt(vm)) + ppgtt = dev_priv->mm.aliasing_ppgtt; + + gen6_write_page_range(dev_priv, &ppgtt->pd, + 0, ppgtt->base.total); + } + } + + i915_ggtt_flush(dev_priv); +} + static struct i915_vma * __i915_gem_vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -2532,7 +2601,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) return ERR_PTR(-EINVAL); - vma = kzalloc(sizeof(*vma), GFP_KERNEL); + + vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); @@ -2542,22 +2612,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, vma->vm = vm; vma->obj = obj; - if (INTEL_INFO(vm->dev)->gen >= 6) { - if (i915_is_ggtt(vm)) { - vma->ggtt_view = *ggtt_view; - - vma->unbind_vma = ggtt_unbind_vma; - vma->bind_vma = ggtt_bind_vma; - } else { - vma->unbind_vma = ppgtt_unbind_vma; - vma->bind_vma = ppgtt_bind_vma; - } - } else { - BUG_ON(!i915_is_ggtt(vm)); + if (i915_is_ggtt(vm)) vma->ggtt_view = *ggtt_view; - vma->unbind_vma = i915_ggtt_unbind_vma; - vma->bind_vma = i915_ggtt_bind_vma; - } list_add_tail(&vma->vma_link, &obj->vma_list); if (!i915_is_ggtt(vm)) @@ -2702,7 +2758,7 @@ err_st_alloc: return ERR_PTR(ret); } -static inline int +static int i915_get_ggtt_vma_pages(struct i915_vma *vma) { int ret = 0; @@ -2746,14 +2802,44 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - if (i915_is_ggtt(vma->vm)) { - int ret = i915_get_ggtt_vma_pages(vma); + int ret; + u32 bind_flags; + + if (WARN_ON(flags == 0)) + return -EINVAL; + + bind_flags = 0; + if (flags & PIN_GLOBAL) + bind_flags |= GLOBAL_BIND; + if (flags & PIN_USER) + bind_flags |= LOCAL_BIND; + + if (flags & PIN_UPDATE) + bind_flags |= vma->bound; + else + bind_flags &= ~vma->bound; + if (bind_flags == 0) + return 0; + + if (vma->bound == 0 && vma->vm->allocate_va_range) { + trace_i915_va_alloc(vma->vm, + vma->node.start, + vma->node.size, + VM_TO_TRACE_NAME(vma->vm)); + + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); if (ret) return ret; } - vma->bind_vma(vma, cache_level, flags); + ret = vma->vm->bind_vma(vma, cache_level, bind_flags); + if (ret) + return ret; + + vma->bound |= bind_flags; return 0; } |