@@ -2449,7 +2449,9 @@ static int dl_overflow(struct task_struct *p, int policy,
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
__dl_clear(dl_b, p->dl.dl_bw);
+ __dl_sub_ac(task_rq(p), p->dl.dl_bw);
__dl_add(dl_b, new_bw);
+ __dl_add_ac(task_rq(p), new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
__dl_clear(dl_b, p->dl.dl_bw);
@@ -83,6 +83,7 @@ void init_dl_rq(struct dl_rq *dl_rq)
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
+ dl_rq->ac_bw = 0;
}
#ifdef CONFIG_SMP
@@ -278,8 +279,10 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
* By now the task is replenished and enqueued; migrate it.
*/
deactivate_task(rq, p, 0);
+ __dl_sub_ac(rq, p->dl.dl_bw);
set_task_cpu(p, later_rq->cpu);
activate_task(later_rq, p, 0);
+ __dl_add_ac(later_rq, p->dl.dl_bw);
if (!fallback)
resched_curr(later_rq);
@@ -506,6 +509,7 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
*/
if (dl_se->dl_new) {
setup_new_dl_entity(dl_se, pi_se);
+ __dl_add_ac(rq, dl_se->dl_bw);
return;
}
@@ -955,6 +959,9 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
return;
}
+ if (p->on_rq == TASK_ON_RQ_MIGRATING)
+ __dl_add_ac(rq, p->dl.dl_bw);
+
/*
* If p is throttled, we do nothing. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
@@ -980,6 +987,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
+ if (p->on_rq == TASK_ON_RQ_MIGRATING)
+ __dl_sub_ac(rq, p->dl.dl_bw);
}
/*
@@ -1219,6 +1228,8 @@ static void task_dead_dl(struct task_struct *p)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+ __dl_sub_ac(task_rq(p), p->dl.dl_bw);
+
/*
* Since we are TASK_DEAD we won't slip out of the domain!
*/
@@ -1511,8 +1522,10 @@ retry:
}
deactivate_task(rq, next_task, 0);
+ __dl_sub_ac(rq, next_task->dl.dl_bw);
set_task_cpu(next_task, later_rq->cpu);
activate_task(later_rq, next_task, 0);
+ __dl_add_ac(later_rq, next_task->dl.dl_bw);
ret = 1;
resched_curr(later_rq);
@@ -1599,8 +1612,10 @@ static void pull_dl_task(struct rq *this_rq)
resched = true;
deactivate_task(src_rq, p, 0);
+ __dl_sub_ac(src_rq, p->dl.dl_bw);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
+ __dl_add_ac(this_rq, p->dl.dl_bw);
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
@@ -1705,6 +1720,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (!start_dl_timer(p))
__dl_clear_params(p);
+ if (dl_prio(p->normal_prio))
+ __dl_sub_ac(rq, p->dl.dl_bw);
+
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
@@ -519,6 +519,14 @@ struct dl_rq {
#else
struct dl_bw dl_bw;
#endif
+
+ /*
+ * ac_bw keeps track of per rq admitted bandwidth. It only changes
+ * when a new task is admitted, it dies, it changes scheduling policy
+ * or is migrated to another rq. It is used to correctly save/resore
+ * total_bw on root_domain changes.
+ */
+ u64 ac_bw;
};
#ifdef CONFIG_SMP
@@ -720,6 +728,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
+static inline
+void __dl_sub_ac(struct rq *rq, u64 tsk_bw)
+{
+ WARN_ON(rq->dl.ac_bw == 0);
+
+ rq->dl.ac_bw -= tsk_bw;
+}
+
+static inline
+void __dl_add_ac(struct rq *rq, u64 tsk_bw)
+{
+ rq->dl.ac_bw += tsk_bw;
+}
+
static inline u64 __rq_clock_broken(struct rq *rq)
{
return READ_ONCE(rq->clock);