@@ -3437,17 +3437,11 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
return 0;
}
-struct sched_in_data {
- struct perf_event_context *ctx;
- struct perf_cpu_context *cpuctx;
- int can_add_hw;
-};
-
static int merge_sched_in(struct perf_event *event, void *data)
{
- struct sched_in_data *sid = data;
-
- WARN_ON_ONCE(event->ctx != sid->ctx);
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int *can_add_hw = data;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -3455,8 +3449,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
if (!event_filter_match(event))
return 0;
- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+ if (group_can_go_on(event, cpuctx, *can_add_hw)) {
+ if (!group_sched_in(event, cpuctx, ctx))
list_add_tail(&event->active_list, get_event_list(event));
}
@@ -3466,8 +3460,8 @@ static int merge_sched_in(struct perf_event *event, void *data)
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
}
- sid->can_add_hw = 0;
- sid->ctx->rotate_necessary = 1;
+ *can_add_hw = 0;
+ ctx->rotate_necessary = 1;
}
return 0;
@@ -3477,30 +3471,22 @@ static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
+ int can_add_hw = 1;
visit_groups_merge(&ctx->pinned_groups,
smp_processor_id(),
- merge_sched_in, &sid);
+ merge_sched_in, &can_add_hw);
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
+ int can_add_hw = 1;
visit_groups_merge(&ctx->flexible_groups,
smp_processor_id(),
- merge_sched_in, &sid);
+ merge_sched_in, &can_add_hw);
}
static void