mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm/damon: accept parallel damon_call() requests
Patch series "mm/damon: remove damon_callback". damon_callback was the only way for communicating with DAMON for contexts running on its worker thread. The interface is flexible and simple. But as DAMON evolves with more features, damon_callback has become somewhat too old. With runtime parameters update, for example, its lack of synchronization support was found to be inconvenient. Arguably it is also not easy to use correctly since the callers should understand when each callback is called, and implication of the return values from the callbacks. To replace it, damon_call() and damos_walk() are introduced. And those replaced a few damon_callback use cases. Some use cases of damon_callback such as parallel or repetitive DAMON internal data reading and additional cleanups cannot simply be replaced by damon_call() and damos_walk(), though. To allow those replaceable, extend damon_call() for parallel and/or repeated callbacks and modify the core/ops layers for additional resources cleanup. With the updates, replace the remaining damon_callback usages and finally say goodbye to damon_callback. This patch (of 14): Calling damon_call() while it is serving for another parallel thread immediately fails with -EBUSY. The caller should call it again, later. Each caller implementing such retry logic would be redundant. Accept parallel damon_call() requests and do the wait instead of the caller. Link: https://lkml.kernel.org/r/20250712195016.151108-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250712195016.151108-2-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
e001ef9652
commit
004ded6bee
@@ -673,6 +673,8 @@ struct damon_call_control {
|
||||
struct completion completion;
|
||||
/* informs if the kdamond canceled @fn infocation */
|
||||
bool canceled;
|
||||
/* List head for siblings. */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -798,8 +800,9 @@ struct damon_ctx {
|
||||
/* for scheme quotas prioritization */
|
||||
unsigned long *regions_score_histogram;
|
||||
|
||||
struct damon_call_control *call_control;
|
||||
struct mutex call_control_lock;
|
||||
/* lists of &struct damon_call_control */
|
||||
struct list_head call_controls;
|
||||
struct mutex call_controls_lock;
|
||||
|
||||
struct damos_walk_control *walk_control;
|
||||
struct mutex walk_control_lock;
|
||||
|
||||
@@ -533,7 +533,8 @@ struct damon_ctx *damon_new_ctx(void)
|
||||
ctx->next_ops_update_sis = 0;
|
||||
|
||||
mutex_init(&ctx->kdamond_lock);
|
||||
mutex_init(&ctx->call_control_lock);
|
||||
INIT_LIST_HEAD(&ctx->call_controls);
|
||||
mutex_init(&ctx->call_controls_lock);
|
||||
mutex_init(&ctx->walk_control_lock);
|
||||
|
||||
ctx->attrs.min_nr_regions = 10;
|
||||
@@ -1393,14 +1394,11 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
|
||||
{
|
||||
init_completion(&control->completion);
|
||||
control->canceled = false;
|
||||
INIT_LIST_HEAD(&control->list);
|
||||
|
||||
mutex_lock(&ctx->call_control_lock);
|
||||
if (ctx->call_control) {
|
||||
mutex_unlock(&ctx->call_control_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
ctx->call_control = control;
|
||||
mutex_unlock(&ctx->call_control_lock);
|
||||
mutex_lock(&ctx->call_controls_lock);
|
||||
list_add_tail(&ctx->call_controls, &control->list);
|
||||
mutex_unlock(&ctx->call_controls_lock);
|
||||
if (!damon_is_running(ctx))
|
||||
return -EINVAL;
|
||||
wait_for_completion(&control->completion);
|
||||
@@ -2419,11 +2417,11 @@ static void kdamond_usleep(unsigned long usecs)
|
||||
}
|
||||
|
||||
/*
|
||||
* kdamond_call() - handle damon_call_control.
|
||||
* kdamond_call() - handle damon_call_control objects.
|
||||
* @ctx: The &struct damon_ctx of the kdamond.
|
||||
* @cancel: Whether to cancel the invocation of the function.
|
||||
*
|
||||
* If there is a &struct damon_call_control request that registered via
|
||||
* If there are &struct damon_call_control requests that registered via
|
||||
* &damon_call() on @ctx, do or cancel the invocation of the function depending
|
||||
* on @cancel. @cancel is set when the kdamond is already out of the main loop
|
||||
* and therefore will be terminated.
|
||||
@@ -2433,21 +2431,24 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
|
||||
struct damon_call_control *control;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ctx->call_control_lock);
|
||||
control = ctx->call_control;
|
||||
mutex_unlock(&ctx->call_control_lock);
|
||||
if (!control)
|
||||
return;
|
||||
if (cancel) {
|
||||
control->canceled = true;
|
||||
} else {
|
||||
ret = control->fn(control->data);
|
||||
control->return_code = ret;
|
||||
while (true) {
|
||||
mutex_lock(&ctx->call_controls_lock);
|
||||
control = list_first_entry_or_null(&ctx->call_controls,
|
||||
struct damon_call_control, list);
|
||||
mutex_unlock(&ctx->call_controls_lock);
|
||||
if (!control)
|
||||
return;
|
||||
if (cancel) {
|
||||
control->canceled = true;
|
||||
} else {
|
||||
ret = control->fn(control->data);
|
||||
control->return_code = ret;
|
||||
}
|
||||
mutex_lock(&ctx->call_controls_lock);
|
||||
list_del(&control->list);
|
||||
mutex_unlock(&ctx->call_controls_lock);
|
||||
complete(&control->completion);
|
||||
}
|
||||
complete(&control->completion);
|
||||
mutex_lock(&ctx->call_control_lock);
|
||||
ctx->call_control = NULL;
|
||||
mutex_unlock(&ctx->call_control_lock);
|
||||
}
|
||||
|
||||
/* Returns negative error code if it's not activated but should return */
|
||||
|
||||
Reference in New Issue
Block a user