@@ -392,6 +392,16 @@ struct sched_entity {
#endif
};
+/* cfs_rq "owned" by this sched_entity */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *se)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ return se->my_q;
+#else
+ return NULL;
+#endif
+}
+
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
@@ -292,12 +292,6 @@ static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
return se->cfs_rq;
}
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return grp->my_q;
-}
-
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
@@ -449,12 +443,6 @@ static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
return &rq->cfs;
}
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return NULL;
-}
-
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}
Export struct cfs_rq *group_cfs_rq(struct sched_entity *se) to be able to distinguish sched_entities representing either tasks or task_groups in the sched_entity related load tracking trace event provided by the next patch. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> --- include/linux/sched.h | 10 ++++++++++ kernel/sched/fair.c | 12 ------------ 2 files changed, 10 insertions(+), 12 deletions(-) -- 2.11.0