summaryrefslogtreecommitdiff
path: root/mm/damon/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/core.c')
-rw-r--r--mm/damon/core.c259
1 files changed, 243 insertions, 16 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 0776452a1abb..384935ef4e65 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -14,6 +14,7 @@
#include <linux/psi.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#define CREATE_TRACE_POINTS
#include <trace/events/damon.h>
@@ -266,7 +267,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
}
struct damos_filter *damos_new_filter(enum damos_filter_type type,
- bool matching)
+ bool matching, bool allow)
{
struct damos_filter *filter;
@@ -275,6 +276,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type,
return NULL;
filter->type = type;
filter->matching = matching;
+ filter->allow = allow;
INIT_LIST_HEAD(&filter->list);
return filter;
}
@@ -371,6 +373,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
* or damon_attrs are updated.
*/
scheme->next_apply_sis = 0;
+ scheme->walk_completed = false;
INIT_LIST_HEAD(&scheme->filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -504,6 +507,8 @@ struct damon_ctx *damon_new_ctx(void)
ctx->next_ops_update_sis = 0;
mutex_init(&ctx->kdamond_lock);
+ mutex_init(&ctx->call_control_lock);
+ mutex_init(&ctx->walk_control_lock);
ctx->attrs.min_nr_regions = 10;
ctx->attrs.max_nr_regions = 1000;
@@ -803,7 +808,8 @@ static int damos_commit_filters(struct damos *dst, struct damos *src)
continue;
new_filter = damos_new_filter(
- src_filter->type, src_filter->matching);
+ src_filter->type, src_filter->matching,
+ src_filter->allow);
if (!new_filter)
return -ENOMEM;
damos_commit_filter_arg(new_filter, src_filter);
@@ -1162,6 +1168,94 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
return err;
}
+static bool damon_is_running(struct damon_ctx *ctx)
+{
+ bool running;
+
+ mutex_lock(&ctx->kdamond_lock);
+ running = ctx->kdamond != NULL;
+ mutex_unlock(&ctx->kdamond_lock);
+ return running;
+}
+
+/**
+ * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
+ * @ctx: DAMON context to call the function for.
+ * @control: Control variable of the call request.
+ *
+ * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
+ * argument data that respectively passed via &damon_call_control->fn and
+ * &damon_call_control->data of @control, and wait until the kdamond finishes
+ * handling of the request.
+ *
+ * The kdamond executes the function with the argument in the main loop, just
+ * after a sampling of the iteration is finished. The function can hence
+ * safely access the internal data of the &struct damon_ctx without additional
+ * synchronization. The return value of the function will be saved in
+ * &damon_call_control->return_code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
+{
+ init_completion(&control->completion);
+ control->canceled = false;
+
+ mutex_lock(&ctx->call_control_lock);
+ if (ctx->call_control) {
+ mutex_unlock(&ctx->call_control_lock);
+ return -EBUSY;
+ }
+ ctx->call_control = control;
+ mutex_unlock(&ctx->call_control_lock);
+ if (!damon_is_running(ctx))
+ return -EINVAL;
+ wait_for_completion(&control->completion);
+ if (control->canceled)
+ return -ECANCELED;
+ return 0;
+}
+
+/**
+ * damos_walk() - Invoke a given functions while DAMOS walk regions.
+ * @ctx: DAMON context to call the functions for.
+ * @control: Control variable of the walk request.
+ *
+ * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
+ * that the kdamond will apply DAMOS action to, and wait until the kdamond
+ * finishes handling of the request.
+ *
+ * The kdamond executes the given function in the main loop, for each region
+ * just after it applied any DAMOS actions of @ctx to it. The invocation is
+ * made only within one &damos->apply_interval_us since damos_walk()
+ * invocation, for each scheme. The given callback function can hence safely
+ * access the internal data of &struct damon_ctx and &struct damon_region that
+ * each of the scheme will apply the action for next interval, without
+ * additional synchronizations against the kdamond. If every scheme of @ctx
+ * passed at least one &damos->apply_interval_us, kdamond marks the request as
+ * completed so that damos_walk() can wakeup and return.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
+{
+ init_completion(&control->completion);
+ control->canceled = false;
+ mutex_lock(&ctx->walk_control_lock);
+ if (ctx->walk_control) {
+ mutex_unlock(&ctx->walk_control_lock);
+ return -EBUSY;
+ }
+ ctx->walk_control = control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!damon_is_running(ctx))
+ return -EINVAL;
+ wait_for_completion(&control->completion);
+ if (control->canceled)
+ return -ECANCELED;
+ return 0;
+}
+
/*
* Reset the aggregated monitoring results ('nr_accesses' of each region).
*/
@@ -1272,16 +1366,18 @@ static bool damos_skip_charged_region(struct damon_target *t,
}
static void damos_update_stat(struct damos *s,
- unsigned long sz_tried, unsigned long sz_applied)
+ unsigned long sz_tried, unsigned long sz_applied,
+ unsigned long sz_ops_filter_passed)
{
s->stat.nr_tried++;
s->stat.sz_tried += sz_tried;
if (sz_applied)
s->stat.nr_applied++;
s->stat.sz_applied += sz_applied;
+ s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
}
-static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
+static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
struct damon_region *r, struct damos_filter *filter)
{
bool matched = false;
@@ -1334,13 +1430,103 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
{
struct damos_filter *filter;
+ s->core_filters_allowed = false;
damos_for_each_filter(filter, s) {
- if (__damos_filter_out(ctx, t, r, filter))
- return true;
+ if (damos_filter_match(ctx, t, r, filter)) {
+ if (filter->allow)
+ s->core_filters_allowed = true;
+ return !filter->allow;
+ }
}
return false;
}
+/*
+ * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
+ * @ctx: The context of &damon_ctx->walk_control.
+ * @t: The monitoring target of @r that @s will be applied.
+ * @r: The region of @t that @s will be applied.
+ * @s: The scheme of @ctx that will be applied to @r.
+ *
+ * This function is called from kdamond whenever it asked the operation set to
+ * apply a DAMOS scheme action to a region. If a DAMOS walk request is
+ * installed by damos_walk() and not yet uninstalled, invoke it.
+ */
+static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
+ struct damon_region *r, struct damos *s,
+ unsigned long sz_filter_passed)
+{
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!control)
+ return;
+ control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
+}
+
+/*
+ * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
+ * @ctx: The context of &damon_ctx->walk_control.
+ * @s: A scheme of @ctx that all walks are now done.
+ *
+ * This function is called when kdamond finished applying the action of a DAMOS
+ * scheme to all regions that eligible for the given &damos->apply_interval_us.
+ * If every scheme of @ctx including @s now finished walking for at least one
+ * &damos->apply_interval_us, this function makrs the handling of the given
+ * DAMOS walk request is done, so that damos_walk() can wake up and return.
+ */
+static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
+{
+ struct damos *siter;
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!control)
+ return;
+
+ s->walk_completed = true;
+ /* if all schemes completed, signal completion to walker */
+ damon_for_each_scheme(siter, ctx) {
+ if (!siter->walk_completed)
+ return;
+ }
+ complete(&control->completion);
+ mutex_lock(&ctx->walk_control_lock);
+ ctx->walk_control = NULL;
+ mutex_unlock(&ctx->walk_control_lock);
+}
+
+/*
+ * damos_walk_cancel() - Cancel the current DAMOS walk request.
+ * @ctx: The context of &damon_ctx->walk_control.
+ *
+ * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
+ * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
+ * is already out of the main loop and therefore gonna be terminated, and hence
+ * cannot continue the walks. This function therefore marks the walk request
+ * as canceled, so that damos_walk() can wake up and return.
+ */
+static void damos_walk_cancel(struct damon_ctx *ctx)
+{
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+
+ if (!control)
+ return;
+ control->canceled = true;
+ complete(&control->completion);
+ mutex_lock(&ctx->walk_control_lock);
+ ctx->walk_control = NULL;
+ mutex_unlock(&ctx->walk_control_lock);
+}
+
static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
struct damon_region *r, struct damos *s)
{
@@ -1348,6 +1534,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
unsigned long sz = damon_sz_region(r);
struct timespec64 begin, end;
unsigned long sz_applied = 0;
+ unsigned long sz_ops_filter_passed = 0;
int err = 0;
/*
* We plan to support multiple context per kdamond, as DAMON sysfs
@@ -1393,8 +1580,10 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
if (!err) {
trace_damos_before_apply(cidx, sidx, tidx, r,
damon_nr_regions(t), do_trace);
- sz_applied = c->ops.apply_scheme(c, t, r, s);
+ sz_applied = c->ops.apply_scheme(c, t, r, s,
+ &sz_ops_filter_passed);
}
+ damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
ktime_get_coarse_ts64(&end);
quota->total_charged_ns += timespec64_to_ns(&end) -
timespec64_to_ns(&begin);
@@ -1408,7 +1597,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
r->age = 0;
update_stat:
- damos_update_stat(s, sz, sz_applied);
+ damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
}
static void damon_do_apply_schemes(struct damon_ctx *c,
@@ -1550,7 +1739,7 @@ static unsigned long damos_quota_score(struct damos_quota *quota)
static void damos_set_effective_quota(struct damos_quota *quota)
{
unsigned long throughput;
- unsigned long esz;
+ unsigned long esz = ULONG_MAX;
if (!quota->ms && list_empty(&quota->goals)) {
quota->esz = quota->sz;
@@ -1572,10 +1761,7 @@ static void damos_set_effective_quota(struct damos_quota *quota)
quota->total_charged_ns;
else
throughput = PAGE_SIZE * 1024;
- if (!list_empty(&quota->goals))
- esz = min(throughput * quota->ms, esz);
- else
- esz = throughput * quota->ms;
+ esz = min(throughput * quota->ms, esz);
}
if (quota->sz && quota->sz < esz)
@@ -1666,6 +1852,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
damon_for_each_scheme(s, c) {
if (c->passed_sample_intervals < s->next_apply_sis)
continue;
+ damos_walk_complete(c, s);
s->next_apply_sis = c->passed_sample_intervals +
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
@@ -1894,9 +2081,8 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
if (scheme->wmarks.activated)
pr_debug("deactivate a scheme (%d) for %s wmark\n",
- scheme->action,
- metric > scheme->wmarks.high ?
- "high" : "low");
+ scheme->action,
+ str_high_low(metric > scheme->wmarks.high));
scheme->wmarks.activated = false;
return scheme->wmarks.interval;
}
@@ -1920,6 +2106,39 @@ static void kdamond_usleep(unsigned long usecs)
usleep_range_idle(usecs, usecs + 1);
}
+/*
+ * kdamond_call() - handle damon_call_control.
+ * @ctx: The &struct damon_ctx of the kdamond.
+ * @cancel: Whether to cancel the invocation of the function.
+ *
+ * If there is a &struct damon_call_control request that registered via
+ * &damon_call() on @ctx, do or cancel the invocation of the function depending
+ * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS
+ * watermarks, or the kdamond is already out of the main loop and therefore
+ * will be terminated.
+ */
+static void kdamond_call(struct damon_ctx *ctx, bool cancel)
+{
+ struct damon_call_control *control;
+ int ret = 0;
+
+ mutex_lock(&ctx->call_control_lock);
+ control = ctx->call_control;
+ mutex_unlock(&ctx->call_control_lock);
+ if (!control)
+ return;
+ if (cancel) {
+ control->canceled = true;
+ } else {
+ ret = control->fn(control->data);
+ control->return_code = ret;
+ }
+ complete(&control->completion);
+ mutex_lock(&ctx->call_control_lock);
+ ctx->call_control = NULL;
+ mutex_unlock(&ctx->call_control_lock);
+}
+
/* Returns negative error code if it's not activated but should return */
static int kdamond_wait_activation(struct damon_ctx *ctx)
{
@@ -1944,6 +2163,8 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
if (ctx->callback.after_wmarks_check &&
ctx->callback.after_wmarks_check(ctx))
break;
+ kdamond_call(ctx, true);
+ damos_walk_cancel(ctx);
}
return -EBUSY;
}
@@ -2014,6 +2235,7 @@ static int kdamond_fn(void *data)
if (ctx->callback.after_sampling &&
ctx->callback.after_sampling(ctx))
break;
+ kdamond_call(ctx, false);
kdamond_usleep(sample_interval);
ctx->passed_sample_intervals++;
@@ -2036,6 +2258,8 @@ static int kdamond_fn(void *data)
*/
if (!list_empty(&ctx->schemes))
kdamond_apply_schemes(ctx);
+ else
+ damos_walk_cancel(ctx);
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
@@ -2075,6 +2299,9 @@ done:
ctx->kdamond = NULL;
mutex_unlock(&ctx->kdamond_lock);
+ kdamond_call(ctx, true);
+ damos_walk_cancel(ctx);
+
mutex_lock(&damon_lock);
nr_running_ctxs--;
if (!nr_running_ctxs && running_exclusive_ctxs)