@@ -509,7 +509,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist)
return 0;
- maps = evlist->mmap;
+ maps = backward ? evlist->backward_mmap : evlist->mmap;
if (!maps)
return 0;
@@ -696,8 +696,12 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
static const struct perf_event_mmap_page *
perf_evlist__pick_pc(struct perf_evlist *evlist)
{
- if (evlist && evlist->mmap && evlist->mmap[0].base)
- return evlist->mmap[0].base;
+ if (evlist) {
+ if (evlist->mmap && evlist->mmap[0].base)
+ return evlist->mmap[0].base;
+ if (evlist->backward_mmap && evlist->backward_mmap[0].base)
+ return evlist->backward_mmap[0].base;
+ }
return NULL;
}
@@ -123,6 +123,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
+ zfree(&evlist->backward_mmap);
fdarray__exit(&evlist->pollfd);
}
@@ -973,17 +974,20 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
int i;
- if (evlist->mmap == NULL)
- return;
+ if (evlist->mmap)
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap[i]);
- for (i = 0; i < evlist->nr_mmaps; i++)
- perf_mmap__munmap(&evlist->mmap[i]);
+ if (evlist->backward_mmap)
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->backward_mmap[i]);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
{
perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
+ zfree(&evlist->backward_mmap);
}
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
@@ -61,6 +61,7 @@ struct perf_evlist {
} workload;
struct fdarray pollfd;
struct perf_mmap *mmap;
+ struct perf_mmap *backward_mmap;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;