From: Bijan Tabatabai <bijantabatab@xxxxxxxxxx> damos->migrate_dests provides a list of nodes the migrate_{hot,cold} actions should migrate to, as well as the weights which specify the ratio pages should be migrated to each destination node. This patch interleaves pages in the migrate_{hot,cold} actions according to the information provided in damos->migrate_dests if it is used. The interleaving algorithm used is similar to the one used in weighted_interleave_nid(). If damos->migration_dests is not provided, the actions migrate pages to the node specified in damos->target_nid as before. Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@xxxxxxxxxx> Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@xxxxxxxxxx> Signed-off-by: Bijan Tabatabai <bijantabatab@xxxxxxxxxx> --- mm/damon/vaddr.c | 114 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 97 insertions(+), 17 deletions(-) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 5cdfdc47c5ff..5f230a427fdc 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -611,11 +611,76 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } +struct damos_va_migrate_private { + struct list_head *migration_lists; + struct damos *scheme; +}; + +/* + * Place the given folio in the migration_list corresponding to where the folio + * should be migrated. + * + * The algorithm used here is similar to weighted_interleave_nid() + */ +static void damos_va_migrate_folio(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, + struct damos_migrate_dests *dests, + struct list_head *migration_lists) +{ + pgoff_t ilx; + int order; + unsigned int target; + unsigned int weight_total = 0; + int i; + + /* + * If dests is empty, there is only one migration list corresponding + * to s->target_nid. + */ + if (!dests->nr_dests) { + i = 0; + goto isolate; + } + + order = folio_order(folio); + ilx = vma->vm_pgoff >> order; + ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order); + + for (i = 0; i < dests->nr_dests; i++) + weight_total += dests->weight_arr[i]; + + /* If the total weights are somehow 0, don't migrate at all */ + if (!weight_total) + return; + + target = ilx % weight_total; + for (i = 0; i < dests->nr_dests; i++) { + if (target < dests->weight_arr[i]) + break; + target -= dests->weight_arr[i]; + } + + /* No index being chosen indicates a mistake in the algorithm */ + if (i == dests->nr_dests) { + WARN_ONCE(1, "Error determining target interleave node"); + return; + } + +isolate: + if (!folio_isolate_lru(folio)) + return; + + list_add(&folio->lru, &migration_lists[i]); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { - struct list_head *migration_list = walk->private; + struct damos_va_migrate_private *priv = walk->private; + struct damos *s = priv->scheme; + struct list_head *migration_lists = priv->migration_lists; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; spinlock_t *ptl; pmd_t pmde; @@ -630,12 +695,8 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, if (!folio) goto unlock; - if (!folio_isolate_lru(folio)) - goto put_folio; - - list_add(&folio->lru, migration_list); + damos_va_migrate_folio(folio, walk->vma, addr, dests, migration_lists); -put_folio: folio_put(folio); unlock: spin_unlock(ptl); @@ -648,7 +709,10 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, unsigned long enxt, struct mm_walk *walk) { - struct list_head *migration_list = walk->private; + struct damos_va_migrate_private *priv = walk->private; + struct damos *s = priv->scheme; + struct list_head *migration_lists = priv->migration_lists; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; pte_t ptent; @@ -660,12 +724,8 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, if (!folio) return 0; - if (!folio_isolate_lru(folio)) - goto out; - - list_add(&folio->lru, migration_list); + damos_va_migrate_folio(folio, walk->vma, addr, dests, migration_lists); -out: folio_put(folio); return 0; } @@ -717,34 +777,54 @@ static unsigned long damos_va_migrate(struct damon_target *target, struct damon_region *r, struct damos *s, unsigned long *sz_filter_passed) { - LIST_HEAD(folio_list); + struct damos_va_migrate_private priv; struct task_struct *task; struct mm_struct *mm; + int nr_dests; + int nid; + bool use_target_nid; unsigned long applied = 0; + struct damos_migrate_dests *dests = &s->migrate_dests; struct mm_walk_ops walk_ops = { .pmd_entry = damos_va_migrate_pmd_entry, .pte_entry = damos_va_migrate_pte_entry, .walk_lock = PGWALK_RDLOCK, }; + use_target_nid = dests->nr_dests == 0; + nr_dests = use_target_nid ? 1 : dests->nr_dests; + priv.scheme = s; + priv.migration_lists = kmalloc_array(nr_dests, + sizeof(struct list_head), GFP_KERNEL); + if (!priv.migration_lists) + return 0; + + for (int i = 0; i < nr_dests; i++) + INIT_LIST_HEAD(&priv.migration_lists[i]); + task = damon_get_task_struct(target); if (!task) - return 0; + goto free_lists; mm = damon_get_mm(target); if (!mm) goto put_task; mmap_read_lock(mm); - walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list); + walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); mmap_read_unlock(mm); mmput(mm); - applied = damon_migrate_pages(&folio_list, s->target_nid); - cond_resched(); + for (int i = 0; i < nr_dests; i++) { + nid = use_target_nid ? s->target_nid : dests->node_id_arr[i]; + applied += damon_migrate_pages(&priv.migration_lists[i], nid); + cond_resched(); + } put_task: put_task_struct(task); +free_lists: + kfree(priv.migration_lists); return applied * PAGE_SIZE; } -- 2.43.5