diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 11 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 3 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/shrinker.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 13 |
7 files changed, 29 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4f542444a91f..86ee29b5c39c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2823,7 +2823,7 @@ void folio_undo_large_rmappable(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + list_del_init(&folio->_deferred_list); } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b226090fd906..651b86ff03dd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7543,6 +7543,17 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new) /* Transfer the charge and the css ref */ commit_charge(new, memcg); + /* + * If the old folio is a large folio and is in the split queue, it needs + * to be removed from the split queue now, in case getting an incorrect + * split queue in destroy_large_folio() after the memcg of the old folio + * is cleared. + * + * In addition, the old folio is about to be freed after migration, so + * removing from the split queue a bit earlier seems reasonable. + */ + if (folio_test_large(old) && folio_test_large_rmappable(old)) + folio_undo_large_rmappable(old); old->memcg_data = 0; } diff --git a/mm/memory.c b/mm/memory.c index 5c757fba8858..6e0712d06cd4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3624,8 +3624,8 @@ EXPORT_SYMBOL_GPL(unmap_mapping_pages); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { - pgoff_t hba = holebegin >> PAGE_SHIFT; - pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; + pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; + pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { diff --git a/mm/mmap.c b/mm/mmap.c index 1971bfffcc03..aa82eec17489 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1829,6 +1829,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, */ pgoff = 0; get_area = shmem_get_unmapped_area; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + /* Ensures that larger anonymous mappings are THP aligned. */ + get_area = thp_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ee2fd6a6af40..05e5c425b3ff 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -692,7 +692,6 @@ static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ra if (min_ratio > 100 * BDI_RATIO_SCALE) return -EINVAL; - min_ratio *= BDI_RATIO_SCALE; spin_lock_bh(&bdi_lock); if (min_ratio > bdi->max_ratio) { @@ -729,7 +728,8 @@ static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ra ret = -EINVAL; } else { bdi->max_ratio = max_ratio; - bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; + bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / + (100 * BDI_RATIO_SCALE); } spin_unlock_bh(&bdi_lock); diff --git a/mm/shrinker.c b/mm/shrinker.c index dd91eab43ed3..dc5d2a6fcfc4 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -126,7 +126,7 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, if (new_nr_max <= old->map_nr_max) continue; - new = kvmalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid); + new = kvzalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid); if (!new) return -ENOMEM; diff --git a/mm/vmscan.c b/mm/vmscan.c index 9dd8977de5a2..bba207f41b14 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3955,6 +3955,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) int young = 0; pte_t *pte = pvmw->pte; unsigned long addr = pvmw->address; + struct vm_area_struct *vma = pvmw->vma; struct folio *folio = pfn_folio(pvmw->pfn); bool can_swap = !folio_is_file_lru(folio); struct mem_cgroup *memcg = folio_memcg(folio); @@ -3969,11 +3970,15 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) if (spin_is_contended(pvmw->ptl)) return; + /* exclude special VMAs containing anon pages from COW */ + if (vma->vm_flags & VM_SPECIAL) + return; + /* avoid taking the LRU lock under the PTL when possible */ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; - start = max(addr & PMD_MASK, pvmw->vma->vm_start); - end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; + start = max(addr & PMD_MASK, vma->vm_start); + end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) @@ -3998,7 +4003,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) unsigned long pfn; pte_t ptent = ptep_get(pte + i); - pfn = get_pte_pfn(ptent, pvmw->vma, addr); + pfn = get_pte_pfn(ptent, vma, addr); if (pfn == -1) continue; @@ -4009,7 +4014,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) if (!folio) continue; - if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) + if (!ptep_test_and_clear_young(vma, addr, pte + i)) VM_WARN_ON_ONCE(true); young++; |