@@ -568,6 +568,12 @@ static bool intel_cqm_sched_in_event(u32 rmid)
leader = list_first_entry(&cache_groups, struct perf_event,
hw.cqm_groups_entry);
+
+ if (!list_empty(&cache_groups) && !__rmid_valid(leader->hw.cqm_rmid)) {
+ intel_cqm_xchg_rmid(leader, rmid);
+ return true;
+ }
+
event = leader;
list_for_each_entry_continue(event, &cache_groups,
@@ -736,6 +742,7 @@ static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
{
struct perf_event *group, *g;
u32 rmid;
+ LIST_HEAD(conflicting_groups);
lockdep_assert_held(&cache_mutex);
@@ -759,6 +766,7 @@ static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
intel_cqm_xchg_rmid(group, INVALID_RMID);
__put_rmid(rmid);
+ list_move_tail(&group->hw.cqm_groups_entry, &conflicting_groups);
}
}
@@ -788,9 +796,9 @@ static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
*/
static bool __intel_cqm_rmid_rotate(void)
{
- struct perf_event *group, *start = NULL;
+ struct perf_event *group, *start;
unsigned int threshold_limit;
- unsigned int nr_needed = 0;
+ unsigned int nr_needed;
unsigned int nr_available;
bool rotated = false;
@@ -804,6 +812,8 @@ again:
if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
goto out;
+ nr_needed = 0;
+ start = NULL;
list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
if (!__rmid_valid(group->hw.cqm_rmid)) {
if (!start)