Message ID | 20241213095407.271357-3-gmonaco@redhat.com |
---|---|
State | New |
Headers | show |
Series | sched: Move task_mm_cid_work to mm delayed work | expand |
On 2024-12-13 04:54, Gabriele Monaco wrote: > The checks for the scan time in task_mm_cid_work are now superfluous > since the task runs in a delayed_work and the minimum periodicity is > already implied. > > This patch removes those checks and the field from the mm_struct. > > Additionally, we include a simple check to quickly terminate the > function if we have no work to be done (i.e. no mm_cid is allocated). > This is helpful for tasks that sleep for a long time, but also for > terminated task. We are no longer following the process' state, hence > the function continues to run after a process terminates but before its > mm is freed. Can you fold it in patch 1/4 ? Thanks, Mathieu > > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> > --- > include/linux/mm_types.h | 7 ------- > kernel/sched/core.c | 19 +++---------------- > 2 files changed, 3 insertions(+), 23 deletions(-) > > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h > index 92acb827fee4..8a76a1c09234 100644 > --- a/include/linux/mm_types.h > +++ b/include/linux/mm_types.h > @@ -829,12 +829,6 @@ struct mm_struct { > * runqueue locks. > */ > struct mm_cid __percpu *pcpu_cid; > - /* > - * @mm_cid_next_scan: Next mm_cid scan (in jiffies). > - * > - * When the next mm_cid scan is due (in jiffies). > - */ > - unsigned long mm_cid_next_scan; > /** > * @nr_cpus_allowed: Number of CPUs allowed for mm. > * > @@ -1228,7 +1222,6 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct * > return -ENOMEM; > mm_init_cid(mm, p); > INIT_DELAYED_WORK(&mm->mm_cid_work, task_mm_cid_work); > - mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); > schedule_delayed_work(&mm->mm_cid_work, > msecs_to_jiffies(MM_CID_SCAN_DELAY)); > return 0; > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index e3b27b73301c..30d78fe14eff 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -10520,28 +10520,15 @@ static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, > > void task_mm_cid_work(struct work_struct *work) > { > - unsigned long now = jiffies, old_scan, next_scan; > struct cpumask *cidmask; > struct delayed_work *delayed_work = container_of(work, struct delayed_work, work); > struct mm_struct *mm = container_of(delayed_work, struct mm_struct, mm_cid_work); > int weight, cpu; > > - old_scan = READ_ONCE(mm->mm_cid_next_scan); > - next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); > - if (!old_scan) { > - unsigned long res; > - > - res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); > - if (res != old_scan) > - old_scan = res; > - else > - old_scan = next_scan; > - } > - if (time_before(now, old_scan)) > - goto out; > - if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) > - goto out; > cidmask = mm_cidmask(mm); > + /* Nothing to clear for now */ > + if (cpumask_empty(cidmask)) > + goto out; > /* Clear cids that were not recently used. */ > for_each_possible_cpu(cpu) > sched_mm_cid_remote_clear_old(mm, cpu);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 92acb827fee4..8a76a1c09234 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -829,12 +829,6 @@ struct mm_struct { * runqueue locks. */ struct mm_cid __percpu *pcpu_cid; - /* - * @mm_cid_next_scan: Next mm_cid scan (in jiffies). - * - * When the next mm_cid scan is due (in jiffies). - */ - unsigned long mm_cid_next_scan; /** * @nr_cpus_allowed: Number of CPUs allowed for mm. * @@ -1228,7 +1222,6 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct * return -ENOMEM; mm_init_cid(mm, p); INIT_DELAYED_WORK(&mm->mm_cid_work, task_mm_cid_work); - mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); schedule_delayed_work(&mm->mm_cid_work, msecs_to_jiffies(MM_CID_SCAN_DELAY)); return 0; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e3b27b73301c..30d78fe14eff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10520,28 +10520,15 @@ static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, void task_mm_cid_work(struct work_struct *work) { - unsigned long now = jiffies, old_scan, next_scan; struct cpumask *cidmask; struct delayed_work *delayed_work = container_of(work, struct delayed_work, work); struct mm_struct *mm = container_of(delayed_work, struct mm_struct, mm_cid_work); int weight, cpu; - old_scan = READ_ONCE(mm->mm_cid_next_scan); - next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); - if (!old_scan) { - unsigned long res; - - res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); - if (res != old_scan) - old_scan = res; - else - old_scan = next_scan; - } - if (time_before(now, old_scan)) - goto out; - if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) - goto out; cidmask = mm_cidmask(mm); + /* Nothing to clear for now */ + if (cpumask_empty(cidmask)) + goto out; /* Clear cids that were not recently used. */ for_each_possible_cpu(cpu) sched_mm_cid_remote_clear_old(mm, cpu);
The checks for the scan time in task_mm_cid_work are now superfluous since the task runs in a delayed_work and the minimum periodicity is already implied. This patch removes those checks and the field from the mm_struct. Additionally, we include a simple check to quickly terminate the function if we have no work to be done (i.e. no mm_cid is allocated). This is helpful for tasks that sleep for a long time, but also for terminated task. We are no longer following the process' state, hence the function continues to run after a process terminates but before its mm is freed. Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> --- include/linux/mm_types.h | 7 ------- kernel/sched/core.c | 19 +++---------------- 2 files changed, 3 insertions(+), 23 deletions(-)