psi: rename psi fields in preparation for psi trigger addition

Rename psi_group structure member fields used for calculating psi totals
and averages for clear distinction between them and for trigger-related
fields that will be added by "psi: introduce psi monitor".

[surenb@google.com: v6]
  Link: http://lkml.kernel.org/r/20190319235619.260832-4-surenb@google.com
Link: http://lkml.kernel.org/r/20190124211518.244221-5-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2019-05-14 15:41:02 -07:00 committed by Linus Torvalds
parent 9289c5e6a7
commit bcc78db641
2 changed files with 28 additions and 27 deletions

View File

@ -69,17 +69,17 @@ struct psi_group_cpu {
};
struct psi_group {
/* Protects data updated during an aggregation */
struct mutex stat_lock;
/* Protects data used by the aggregator */
struct mutex avgs_lock;
/* Per-cpu task state & time tracking */
struct psi_group_cpu __percpu *pcpu;
/* Periodic aggregation state */
u64 total_prev[NR_PSI_STATES - 1];
u64 last_update;
u64 next_update;
struct delayed_work clock_work;
/* Running pressure averages */
u64 avg_total[NR_PSI_STATES - 1];
u64 avg_last_update;
u64 avg_next_update;
struct delayed_work avgs_work;
/* Total stall times and sampled pressure averages */
u64 total[NR_PSI_STATES - 1];

View File

@ -165,7 +165,7 @@ static struct psi_group psi_system = {
.pcpu = &system_group_pcpu,
};
static void psi_update_work(struct work_struct *work);
static void psi_avgs_work(struct work_struct *work);
static void group_init(struct psi_group *group)
{
@ -173,9 +173,9 @@ static void group_init(struct psi_group *group)
for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->next_update = sched_clock() + psi_period;
INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
mutex_init(&group->stat_lock);
group->avg_next_update = sched_clock() + psi_period;
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
}
void __init psi_init(void)
@ -278,7 +278,7 @@ static bool update_stats(struct psi_group *group)
int cpu;
int s;
mutex_lock(&group->stat_lock);
mutex_lock(&group->avgs_lock);
/*
* Collect the per-cpu time buckets and average them into a
@ -319,7 +319,7 @@ static bool update_stats(struct psi_group *group)
/* avgX= */
now = sched_clock();
expires = group->next_update;
expires = group->avg_next_update;
if (now < expires)
goto out;
if (now - expires >= psi_period)
@ -332,14 +332,14 @@ static bool update_stats(struct psi_group *group)
* But the deltas we sample out of the per-cpu buckets above
* are based on the actual time elapsing between clock ticks.
*/
group->next_update = expires + ((1 + missed_periods) * psi_period);
period = now - (group->last_update + (missed_periods * psi_period));
group->last_update = now;
group->avg_next_update = expires + ((1 + missed_periods) * psi_period);
period = now - (group->avg_last_update + (missed_periods * psi_period));
group->avg_last_update = now;
for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample;
sample = group->total[s] - group->total_prev[s];
sample = group->total[s] - group->avg_total[s];
/*
* Due to the lockless sampling of the time buckets,
* recorded time deltas can slip into the next period,
@ -359,22 +359,22 @@ static bool update_stats(struct psi_group *group)
*/
if (sample > period)
sample = period;
group->total_prev[s] += sample;
group->avg_total[s] += sample;
calc_avgs(group->avg[s], missed_periods, sample, period);
}
out:
mutex_unlock(&group->stat_lock);
mutex_unlock(&group->avgs_lock);
return nonidle_total;
}
static void psi_update_work(struct work_struct *work)
static void psi_avgs_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct psi_group *group;
bool nonidle;
dwork = to_delayed_work(work);
group = container_of(dwork, struct psi_group, clock_work);
group = container_of(dwork, struct psi_group, avgs_work);
/*
* If there is task activity, periodically fold the per-cpu
@ -391,8 +391,9 @@ static void psi_update_work(struct work_struct *work)
u64 now;
now = sched_clock();
if (group->next_update > now)
delay = nsecs_to_jiffies(group->next_update - now) + 1;
if (group->avg_next_update > now)
delay = nsecs_to_jiffies(
group->avg_next_update - now) + 1;
schedule_delayed_work(dwork, delay);
}
}
@ -546,13 +547,13 @@ void psi_task_change(struct task_struct *task, int clear, int set)
*/
if (unlikely((clear & TSK_RUNNING) &&
(task->flags & PF_WQ_WORKER) &&
wq_worker_last_func(task) == psi_update_work))
wq_worker_last_func(task) == psi_avgs_work))
wake_clock = false;
while ((group = iterate_groups(task, &iter))) {
psi_group_change(group, cpu, clear, set);
if (wake_clock && !delayed_work_pending(&group->clock_work))
schedule_delayed_work(&group->clock_work, PSI_FREQ);
if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
}
@ -649,7 +650,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
if (static_branch_likely(&psi_disabled))
return;
cancel_delayed_work_sync(&cgroup->psi.clock_work);
cancel_delayed_work_sync(&cgroup->psi.avgs_work);
free_percpu(cgroup->psi.pcpu);
}