From e2bdbe80a0b7dea9ba73582701b8a67c01e1da4f Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 21 May 2018 22:57:44 +0800 Subject: perf evlist: Introduce force_leader() method For non-explicit group (e.g. those created with -e '{eventA,eventB}'), 'perf report' supports a option '--group' which can enable group output. We also need to support 'perf annotate' with the same '--group'. Create a new function perf_evlist__force_leader() which contains common code to force setting the group leader. Signed-off-by: Jin Yao Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1526914666-31839-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 15 +++++++++++++++ tools/perf/util/evlist.h | 3 +++ 2 files changed, 18 insertions(+) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a59281d64368..e7a4b31a84fb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1795,3 +1795,18 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) return true; } + +/* + * Events in data file are not collect in groups, but we still want + * the group display. Set the artificial group and set the leader's + * forced_leader flag to notify the display code. + */ +void perf_evlist__force_leader(struct perf_evlist *evlist) +{ + if (!evlist->nr_groups) { + struct perf_evsel *leader = perf_evlist__first(evlist); + + perf_evlist__set_leader(evlist); + leader->forced_leader = true; + } +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 6c41b2f78713..dc66436add98 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -309,4 +309,7 @@ struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, union perf_event *event); bool perf_evlist__exclude_kernel(struct perf_evlist *evlist); + +void perf_evlist__force_leader(struct perf_evlist *evlist); + #endif /* __PERF_EVLIST_H */ -- cgit v1.2.3 From a26bb0ba706aef4f42cc9377c0d4e849378574a4 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 21 May 2018 22:57:45 +0800 Subject: perf report: Use perf_evlist__force_leader to support '--group' Since we created a new function perf_evlist__force_leader(), remove the old code and use that new evlist method. Signed-off-by: Jin Yao Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1526914666-31839-3-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 4c931afb2e80..ad978e3ee2b8 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -194,20 +194,11 @@ out: return err; } -/* - * Events in data file are not collect in groups, but we still want - * the group display. Set the artificial group and set the leader's - * forced_leader flag to notify the display code. - */ static void setup_forced_leader(struct report *report, struct perf_evlist *evlist) { - if (report->group_set && !evlist->nr_groups) { - struct perf_evsel *leader = perf_evlist__first(evlist); - - perf_evlist__set_leader(evlist); - leader->forced_leader = true; - } + if (report->group_set) + perf_evlist__force_leader(evlist); } static int process_feature_event(struct perf_tool *tool, -- cgit v1.2.3 From 7ebaf4890f63eb90856b76864a0847413cdf6c86 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 21 May 2018 22:57:46 +0800 Subject: perf annotate: Support '--group' option With the '--group' option, even for non-explicit group, 'perf annotate' will enable the group output. For example, $ perf record -e cycles,branches ./div $ perf annotate main --stdio --group : Disassembly of section .text: : : 00000000004004b0
: : main(): : : return i; : } : : int main(void) : { 0.00 0.00 : 4004b0: push %rbx : int i; : int flag; : volatile double x = 1212121212, y = 121212; : : s_randseed = time(0); 0.00 0.00 : 4004b1: xor %edi,%edi : srand(s_randseed); 0.00 0.00 : 4004b3: mov $0x77359400,%ebx : : return i; : } : But if without --group, there is only one event reported. $ perf annotate main --stdio : Disassembly of section .text: : : 00000000004004b0
: : main(): : : return i; : } : : int main(void) : { 0.00 : 4004b0: push %rbx : int i; : int flag; : volatile double x = 1212121212, y = 121212; : : s_randseed = time(0); 0.00 : 4004b1: xor %edi,%edi : srand(s_randseed); 0.00 : 4004b3: mov $0x77359400,%ebx : : return i; : } Signed-off-by: Jin Yao Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1526914666-31839-4-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 6e5d9f718154..da5704240239 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -45,6 +45,7 @@ struct perf_annotate { bool print_line; bool skip_missing; bool has_br_stack; + bool group_set; const char *sym_hist_filter; const char *cpu_list; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -508,6 +509,9 @@ int cmd_annotate(int argc, const char **argv) "Don't shorten the displayed pathnames"), OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, "Skip symbols that cannot be annotated"), + OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, + &annotate.group_set, + "Show event group information together"), OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_CALLBACK(0, "symfs", NULL, "directory", "Look for files with symbols relative to this directory", @@ -570,6 +574,9 @@ int cmd_annotate(int argc, const char **argv) annotate.has_br_stack = perf_header__has_feat(&annotate.session->header, HEADER_BRANCH_STACK); + if (annotate.group_set) + perf_evlist__force_leader(annotate.session->evlist); + ret = symbol__annotation_init(); if (ret < 0) goto out_delete; -- cgit v1.2.3 From 9cecca325ea879c84fcd31a5e609a514c1a1dbd1 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:32 +0300 Subject: perf machine: Add nr_cpus_avail() Add a function to return the number of the machine's available CPUs. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-5-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/env.c | 13 +++++++++++++ tools/perf/util/env.h | 1 + tools/perf/util/machine.c | 5 +++++ tools/perf/util/machine.h | 1 + 4 files changed, 20 insertions(+) diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 319fb0a0d05e..59f38c7693f8 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -106,11 +106,24 @@ static int perf_env__read_arch(struct perf_env *env) return env->arch ? 0 : -ENOMEM; } +static int perf_env__read_nr_cpus_avail(struct perf_env *env) +{ + if (env->nr_cpus_avail == 0) + env->nr_cpus_avail = cpu__max_present_cpu(); + + return env->nr_cpus_avail ? 0 : -ENOENT; +} + const char *perf_env__raw_arch(struct perf_env *env) { return env && !perf_env__read_arch(env) ? env->arch : "unknown"; } +int perf_env__nr_cpus_avail(struct perf_env *env) +{ + return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; +} + void cpu_cache_level__free(struct cpu_cache_level *cache) { free(cache->type); diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 62e193948608..1f3ccc368530 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -77,5 +77,6 @@ void cpu_cache_level__free(struct cpu_cache_level *cache); const char *perf_env__arch(struct perf_env *env); const char *perf_env__raw_arch(struct perf_env *env); +int perf_env__nr_cpus_avail(struct perf_env *env); #endif /* __PERF_ENV_H */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index e011a7160380..f62ecd9c36e8 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2305,6 +2305,11 @@ bool machine__is(struct machine *machine, const char *arch) return machine && !strcmp(perf_env__raw_arch(machine->env), arch); } +int machine__nr_cpus_avail(struct machine *machine) +{ + return machine ? perf_env__nr_cpus_avail(machine->env) : 0; +} + int machine__get_kernel_start(struct machine *machine) { struct map *map = machine__kernel_map(machine); diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index b31d33b5aa2a..2d2b092ba753 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -189,6 +189,7 @@ static inline bool machine__is_host(struct machine *machine) } bool machine__is(struct machine *machine, const char *arch); +int machine__nr_cpus_avail(struct machine *machine); struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); -- cgit v1.2.3 From 4d99e4136580d178e3523281a820be17bf814bf8 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:33 +0300 Subject: perf machine: Workaround missing maps for x86 PTI entry trampolines On x86_64 the PTI entry trampolines are not in the kernel map created by perf tools. That results in the addresses having no symbols and prevents annotation. It also causes Intel PT to have decoding errors at the trampoline addresses. Workaround that by creating maps for the trampolines. At present the kernel does not export information revealing where the trampolines are. Until that happens, the addresses are hardcoded. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-6-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/machine.h | 3 ++ tools/perf/util/symbol.c | 12 +++--- 3 files changed, 106 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index f62ecd9c36e8..db695603873b 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -851,6 +851,102 @@ static int machine__get_running_kernel_start(struct machine *machine, return 0; } +/* Kernel-space maps for symbols that are outside the main kernel map and module maps */ +struct extra_kernel_map { + u64 start; + u64 end; + u64 pgoff; +}; + +static int machine__create_extra_kernel_map(struct machine *machine, + struct dso *kernel, + struct extra_kernel_map *xm) +{ + struct kmap *kmap; + struct map *map; + + map = map__new2(xm->start, kernel); + if (!map) + return -1; + + map->end = xm->end; + map->pgoff = xm->pgoff; + + kmap = map__kmap(map); + + kmap->kmaps = &machine->kmaps; + + map_groups__insert(&machine->kmaps, map); + + pr_debug2("Added extra kernel map %" PRIx64 "-%" PRIx64 "\n", + map->start, map->end); + + map__put(map); + + return 0; +} + +static u64 find_entry_trampoline(struct dso *dso) +{ + /* Duplicates are removed so lookup all aliases */ + const char *syms[] = { + "_entry_trampoline", + "__entry_trampoline_start", + "entry_SYSCALL_64_trampoline", + }; + struct symbol *sym = dso__first_symbol(dso); + unsigned int i; + + for (; sym; sym = dso__next_symbol(sym)) { + if (sym->binding != STB_GLOBAL) + continue; + for (i = 0; i < ARRAY_SIZE(syms); i++) { + if (!strcmp(sym->name, syms[i])) + return sym->start; + } + } + + return 0; +} + +/* + * These values can be used for kernels that do not have symbols for the entry + * trampolines in kallsyms. + */ +#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL +#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 +#define X86_64_ENTRY_TRAMPOLINE 0x6000 + +/* Map x86_64 PTI entry trampolines */ +int machine__map_x86_64_entry_trampolines(struct machine *machine, + struct dso *kernel) +{ + u64 pgoff = find_entry_trampoline(kernel); + int nr_cpus_avail, cpu; + + if (!pgoff) + return 0; + + nr_cpus_avail = machine__nr_cpus_avail(machine); + + /* Add a 1 page map for each CPU's entry trampoline */ + for (cpu = 0; cpu < nr_cpus_avail; cpu++) { + u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + + cpu * X86_64_CPU_ENTRY_AREA_SIZE + + X86_64_ENTRY_TRAMPOLINE; + struct extra_kernel_map xm = { + .start = va, + .end = va + page_size, + .pgoff = pgoff, + }; + + if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) + return -1; + } + + return 0; +} + static int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 2d2b092ba753..b6a1c3eb3d65 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -268,4 +268,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, */ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); +int machine__map_x86_64_entry_trampolines(struct machine *machine, + struct dso *kernel); + #endif /* __PERF_MACHINE_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4a39f4d0a174..701144094183 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1490,20 +1490,22 @@ int dso__load(struct dso *dso, struct map *map) goto out; } + if (map->groups && map->groups->machine) + machine = map->groups->machine; + else + machine = NULL; + if (dso->kernel) { if (dso->kernel == DSO_TYPE_KERNEL) ret = dso__load_kernel_sym(dso, map); else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) ret = dso__load_guest_kernel_sym(dso, map); + if (machine__is(machine, "x86_64")) + machine__map_x86_64_entry_trampolines(machine, dso); goto out; } - if (map->groups && map->groups->machine) - machine = map->groups->machine; - else - machine = NULL; - dso->adjust_symbols = 0; if (perfmap) { -- cgit v1.2.3 From 4d004365e25251002935fc3843d80934248ad3ed Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:34 +0300 Subject: perf machine: Fix map_groups__split_kallsyms() for entry trampoline symbols When kernel symbols are derived from /proc/kallsyms only (not using vmlinux or /proc/kcore) map_groups__split_kallsyms() is used. However that function makes assumptions that are not true with entry trampoline symbols. For now, remove the entry trampoline symbols at that point, as they are no longer needed at that point. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-7-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.h | 8 ++++++++ tools/perf/util/symbol.c | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index f1afe1ab6ff7..fafcc375ed37 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include "rwsem.h" @@ -239,4 +240,11 @@ static inline bool __map__is_kmodule(const struct map *map) bool map__has_symbols(const struct map *map); +#define ENTRY_TRAMPOLINE_NAME "__entry_SYSCALL_64_trampoline" + +static inline bool is_entry_trampoline(const char *name) +{ + return !strcmp(name, ENTRY_TRAMPOLINE_NAME); +} + #endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 701144094183..929058da6727 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -737,12 +737,15 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, struct rb_root *root = &dso->symbols; struct rb_node *next = rb_first(root); int kernel_range = 0; + bool x86_64; if (!kmaps) return -1; machine = kmaps->machine; + x86_64 = machine__is(machine, "x86_64"); + while (next) { char *module; @@ -790,6 +793,16 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, */ pos->start = curr_map->map_ip(curr_map, pos->start); pos->end = curr_map->map_ip(curr_map, pos->end); + } else if (x86_64 && is_entry_trampoline(pos->name)) { + /* + * These symbols are not needed anymore since the + * trampoline maps refer to the text section and it's + * symbols instead. Avoid having to deal with + * relocations, and the assumption that the first symbol + * is the start of kernel text, by simply removing the + * symbols at this point. + */ + goto discard_symbol; } else if (curr_map != initial_map) { char dso_name[PATH_MAX]; struct dso *ndso; -- cgit v1.2.3 From 5759a6820aadd38b2c8c10e93919eae8e31a9f9a Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:35 +0300 Subject: perf machine: Allow for extra kernel maps Identify extra kernel maps by name so that they can be distinguished from the kernel map and module maps. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-8-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/event.c | 2 +- tools/perf/util/machine.c | 8 ++++++-- tools/perf/util/map.c | 22 ++++++++++++++++++---- tools/perf/util/map.h | 7 ++++++- tools/perf/util/symbol.c | 7 +++---- 5 files changed, 34 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 244135b5ea43..aafa9878465f 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -487,7 +487,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, for (pos = maps__first(maps); pos; pos = map__next(pos)) { size_t size; - if (__map__is_kernel(pos)) + if (!__map__is_kmodule(pos)) continue; size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index db695603873b..355d23bcd443 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -856,6 +856,7 @@ struct extra_kernel_map { u64 start; u64 end; u64 pgoff; + char name[KMAP_NAME_LEN]; }; static int machine__create_extra_kernel_map(struct machine *machine, @@ -875,11 +876,12 @@ static int machine__create_extra_kernel_map(struct machine *machine, kmap = map__kmap(map); kmap->kmaps = &machine->kmaps; + strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); map_groups__insert(&machine->kmaps, map); - pr_debug2("Added extra kernel map %" PRIx64 "-%" PRIx64 "\n", - map->start, map->end); + pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", + kmap->name, map->start, map->end); map__put(map); @@ -940,6 +942,8 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine, .pgoff = pgoff, }; + strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); + if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) return -1; } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index c8fe836e4c3c..6ae97eda370b 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -252,6 +252,13 @@ bool __map__is_kernel(const struct map *map) return machine__kernel_map(map->groups->machine) == map; } +bool __map__is_extra_kernel_map(const struct map *map) +{ + struct kmap *kmap = __map__kmap((struct map *)map); + + return kmap && kmap->name[0]; +} + bool map__has_symbols(const struct map *map) { return dso__has_symbols(map->dso); @@ -846,15 +853,22 @@ struct map *map__next(struct map *map) return NULL; } -struct kmap *map__kmap(struct map *map) +struct kmap *__map__kmap(struct map *map) { - if (!map->dso || !map->dso->kernel) { - pr_err("Internal error: map__kmap with a non-kernel map\n"); + if (!map->dso || !map->dso->kernel) return NULL; - } return (struct kmap *)(map + 1); } +struct kmap *map__kmap(struct map *map) +{ + struct kmap *kmap = __map__kmap(map); + + if (!kmap) + pr_err("Internal error: map__kmap with a non-kernel map\n"); + return kmap; +} + struct map_groups *map__kmaps(struct map *map) { struct kmap *kmap = map__kmap(map); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index fafcc375ed37..97e2a063bd65 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -47,9 +47,12 @@ struct map { refcount_t refcnt; }; +#define KMAP_NAME_LEN 256 + struct kmap { struct ref_reloc_sym *ref_reloc_sym; struct map_groups *kmaps; + char name[KMAP_NAME_LEN]; }; struct maps { @@ -76,6 +79,7 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg) void map_groups__put(struct map_groups *mg); +struct kmap *__map__kmap(struct map *map); struct kmap *map__kmap(struct map *map); struct map_groups *map__kmaps(struct map *map); @@ -232,10 +236,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, struct map *map_groups__find_by_name(struct map_groups *mg, const char *name); bool __map__is_kernel(const struct map *map); +bool __map__is_extra_kernel_map(const struct map *map); static inline bool __map__is_kmodule(const struct map *map) { - return !__map__is_kernel(map); + return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map); } bool map__has_symbols(const struct map *map); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 929058da6727..cdddae67f40c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1030,7 +1030,7 @@ struct map *map_groups__first(struct map_groups *mg) return maps__first(&mg->maps); } -static int do_validate_kcore_modules(const char *filename, struct map *map, +static int do_validate_kcore_modules(const char *filename, struct map_groups *kmaps) { struct rb_root modules = RB_ROOT; @@ -1046,8 +1046,7 @@ static int do_validate_kcore_modules(const char *filename, struct map *map, struct map *next = map_groups__next(old_map); struct module_info *mi; - if (old_map == map || old_map->start == map->start) { - /* The kernel map */ + if (!__map__is_kmodule(old_map)) { old_map = next; continue; } @@ -1104,7 +1103,7 @@ static int validate_kcore_modules(const char *kallsyms_filename, kallsyms_filename)) return -EINVAL; - if (do_validate_kcore_modules(modules_filename, map, kmaps)) + if (do_validate_kcore_modules(modules_filename, kmaps)) return -EINVAL; return 0; -- cgit v1.2.3 From 1c5aae7710bb9ecf82a5cc88e35a028a8b385763 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:36 +0300 Subject: perf machine: Create maps for x86 PTI entry trampolines Create maps for x86 PTI entry trampolines, based on symbols found in kallsyms. It is also necessary to keep track of whether the trampolines have been mapped particularly when the kernel dso is kcore. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-9-git-send-email-adrian.hunter@intel.com [ Fix extra_kernel_map_info.cnt designed struct initializer on gcc 4.4.7 (centos:6, etc) ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/util/Build | 1 + tools/perf/arch/x86/util/machine.c | 103 +++++++++++++++++++++++++++++++++++++ tools/perf/util/machine.c | 66 +++++++++++++++++------- tools/perf/util/machine.h | 19 +++++++ tools/perf/util/symbol.c | 17 ++++++ 5 files changed, 187 insertions(+), 19 deletions(-) create mode 100644 tools/perf/arch/x86/util/machine.c diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build index f95e6f46ef0d..aa1ce5f6cc00 100644 --- a/tools/perf/arch/x86/util/Build +++ b/tools/perf/arch/x86/util/Build @@ -4,6 +4,7 @@ libperf-y += pmu.o libperf-y += kvm-stat.o libperf-y += perf_regs.o libperf-y += group.o +libperf-y += machine.o libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c new file mode 100644 index 000000000000..4520ac53caa9 --- /dev/null +++ b/tools/perf/arch/x86/util/machine.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include "../../util/machine.h" +#include "../../util/map.h" +#include "../../util/symbol.h" +#include "../../util/sane_ctype.h" + +#include + +#if defined(__x86_64__) + +struct extra_kernel_map_info { + int cnt; + int max_cnt; + struct extra_kernel_map *maps; + bool get_entry_trampolines; + u64 entry_trampoline; +}; + +static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start, + u64 end, u64 pgoff, const char *name) +{ + if (mi->cnt >= mi->max_cnt) { + void *buf; + size_t sz; + + mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32; + sz = sizeof(struct extra_kernel_map) * mi->max_cnt; + buf = realloc(mi->maps, sz); + if (!buf) + return -1; + mi->maps = buf; + } + + mi->maps[mi->cnt].start = start; + mi->maps[mi->cnt].end = end; + mi->maps[mi->cnt].pgoff = pgoff; + strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN); + + mi->cnt += 1; + + return 0; +} + +static int find_extra_kernel_maps(void *arg, const char *name, char type, + u64 start) +{ + struct extra_kernel_map_info *mi = arg; + + if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL && + !strcmp(name, "_entry_trampoline")) { + mi->entry_trampoline = start; + return 0; + } + + if (is_entry_trampoline(name)) { + u64 end = start + page_size; + + return add_extra_kernel_map(mi, start, end, 0, name); + } + + return 0; +} + +int machine__create_extra_kernel_maps(struct machine *machine, + struct dso *kernel) +{ + struct extra_kernel_map_info mi = { .cnt = 0, }; + char filename[PATH_MAX]; + int ret; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps); + if (ret) + goto out_free; + + if (!mi.entry_trampoline) + goto out_free; + + for (i = 0; i < mi.cnt; i++) { + struct extra_kernel_map *xm = &mi.maps[i]; + + xm->pgoff = mi.entry_trampoline; + ret = machine__create_extra_kernel_map(machine, kernel, xm); + if (ret) + goto out_free; + } + + machine->trampolines_mapped = mi.cnt; +out_free: + free(mi.maps); + return ret; +} + +#endif diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 355d23bcd443..dd7ab0731167 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -807,8 +807,8 @@ struct process_args { u64 start; }; -static void machine__get_kallsyms_filename(struct machine *machine, char *buf, - size_t bufsz) +void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) { if (machine__is_default_guest(machine)) scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); @@ -851,17 +851,9 @@ static int machine__get_running_kernel_start(struct machine *machine, return 0; } -/* Kernel-space maps for symbols that are outside the main kernel map and module maps */ -struct extra_kernel_map { - u64 start; - u64 end; - u64 pgoff; - char name[KMAP_NAME_LEN]; -}; - -static int machine__create_extra_kernel_map(struct machine *machine, - struct dso *kernel, - struct extra_kernel_map *xm) +int machine__create_extra_kernel_map(struct machine *machine, + struct dso *kernel, + struct extra_kernel_map *xm) { struct kmap *kmap; struct map *map; @@ -923,9 +915,33 @@ static u64 find_entry_trampoline(struct dso *dso) int machine__map_x86_64_entry_trampolines(struct machine *machine, struct dso *kernel) { - u64 pgoff = find_entry_trampoline(kernel); + struct map_groups *kmaps = &machine->kmaps; + struct maps *maps = &kmaps->maps; int nr_cpus_avail, cpu; + bool found = false; + struct map *map; + u64 pgoff; + + /* + * In the vmlinux case, pgoff is a virtual address which must now be + * mapped to a vmlinux offset. + */ + for (map = maps__first(maps); map; map = map__next(map)) { + struct kmap *kmap = __map__kmap(map); + struct map *dest_map; + + if (!kmap || !is_entry_trampoline(kmap->name)) + continue; + + dest_map = map_groups__find(kmaps, map->pgoff); + if (dest_map != map) + map->pgoff = dest_map->map_ip(dest_map, map->pgoff); + found = true; + } + if (found || machine->trampolines_mapped) + return 0; + pgoff = find_entry_trampoline(kernel); if (!pgoff) return 0; @@ -948,6 +964,14 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine, return -1; } + machine->trampolines_mapped = nr_cpus_avail; + + return 0; +} + +int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, + struct dso *kernel __maybe_unused) +{ return 0; } @@ -1306,9 +1330,8 @@ int machine__create_kernel_maps(struct machine *machine) return -1; ret = __machine__create_kernel_maps(machine, kernel); - dso__put(kernel); if (ret < 0) - return -1; + goto out_put; if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { if (machine__is_host(machine)) @@ -1323,7 +1346,8 @@ int machine__create_kernel_maps(struct machine *machine) if (name && map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) { machine__destroy_kernel_maps(machine); - return -1; + ret = -1; + goto out_put; } /* we have a real start address now, so re-order the kmaps */ @@ -1339,12 +1363,16 @@ int machine__create_kernel_maps(struct machine *machine) map__put(map); } + if (machine__create_extra_kernel_maps(machine, kernel)) + pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); + /* update end address of the kernel map using adjacent module address */ map = map__next(machine__kernel_map(machine)); if (map) machine__set_kernel_mmap(machine, addr, map->start); - - return 0; +out_put: + dso__put(kernel); + return ret; } static bool machine__uses_kcore(struct machine *machine) diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index b6a1c3eb3d65..1de7660d93e9 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -56,6 +56,7 @@ struct machine { void *priv; u64 db_id; }; + bool trampolines_mapped; }; static inline struct threads *machine__threads(struct machine *machine, pid_t tid) @@ -268,6 +269,24 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, */ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); +void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz); + +int machine__create_extra_kernel_maps(struct machine *machine, + struct dso *kernel); + +/* Kernel-space maps for symbols that are outside the main kernel map and module maps */ +struct extra_kernel_map { + u64 start; + u64 end; + u64 pgoff; + char name[KMAP_NAME_LEN]; +}; + +int machine__create_extra_kernel_map(struct machine *machine, + struct dso *kernel, + struct extra_kernel_map *xm); + int machine__map_x86_64_entry_trampolines(struct machine *machine, struct dso *kernel); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cdddae67f40c..8c84437f2a10 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1158,6 +1158,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, struct map_groups *kmaps = map__kmaps(map); struct kcore_mapfn_data md; struct map *old_map, *new_map, *replacement_map = NULL; + struct machine *machine; bool is_64_bit; int err, fd; char kcore_filename[PATH_MAX]; @@ -1166,6 +1167,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, if (!kmaps) return -EINVAL; + machine = kmaps->machine; + /* This function requires that the map is the kernel map */ if (!__map__is_kernel(map)) return -EINVAL; @@ -1209,6 +1212,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map_groups__remove(kmaps, old_map); old_map = next; } + machine->trampolines_mapped = false; /* Find the kernel map using the '_stext' symbol */ if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { @@ -1245,6 +1249,19 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map__put(new_map); } + if (machine__is(machine, "x86_64")) { + u64 addr; + + /* + * If one of the corresponding symbols is there, assume the + * entry trampoline maps are too. + */ + if (!kallsyms__get_function_start(kallsyms_filename, + ENTRY_TRAMPOLINE_NAME, + &addr)) + machine->trampolines_mapped = true; + } + /* * Set the data type and long name so that kcore can be read via * dso__data_read_addr(). -- cgit v1.2.3 From a8ce99b0ee9ad32debad0a9f28d21451ba237cc1 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:37 +0300 Subject: perf machine: Synthesize and process mmap events for x86 PTI entry trampolines Like the kernel text, the location of x86 PTI entry trampolines must be recorded in the perf.data file. Like the kernel, synthesize a mmap event for that, and add processing for it. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-10-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/util/Build | 1 + tools/perf/arch/x86/util/event.c | 76 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/event.c | 34 ++++++++++++++---- tools/perf/util/event.h | 8 +++++ tools/perf/util/machine.c | 28 +++++++++++++++ 5 files changed, 140 insertions(+), 7 deletions(-) create mode 100644 tools/perf/arch/x86/util/event.c diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build index aa1ce5f6cc00..844b8f335532 100644 --- a/tools/perf/arch/x86/util/Build +++ b/tools/perf/arch/x86/util/Build @@ -5,6 +5,7 @@ libperf-y += kvm-stat.o libperf-y += perf_regs.o libperf-y += group.o libperf-y += machine.o +libperf-y += event.o libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c new file mode 100644 index 000000000000..675a0213044d --- /dev/null +++ b/tools/perf/arch/x86/util/event.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include "../../util/machine.h" +#include "../../util/tool.h" +#include "../../util/map.h" +#include "../../util/util.h" +#include "../../util/debug.h" + +#if defined(__x86_64__) + +int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int rc = 0; + struct map *pos; + struct map_groups *kmaps = &machine->kmaps; + struct maps *maps = &kmaps->maps; + union perf_event *event = zalloc(sizeof(event->mmap) + + machine->id_hdr_size); + + if (!event) { + pr_debug("Not enough memory synthesizing mmap event " + "for extra kernel maps\n"); + return -1; + } + + for (pos = maps__first(maps); pos; pos = map__next(pos)) { + struct kmap *kmap; + size_t size; + + if (!__map__is_extra_kernel_map(pos)) + continue; + + kmap = map__kmap(pos); + + size = sizeof(event->mmap) - sizeof(event->mmap.filename) + + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + + machine->id_hdr_size; + + memset(event, 0, size); + + event->mmap.header.type = PERF_RECORD_MMAP; + + /* + * kernel uses 0 for user space maps, see kernel/perf_event.c + * __perf_event_mmap + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_KERNEL; + else + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + + event->mmap.header.size = size; + + event->mmap.start = pos->start; + event->mmap.len = pos->end - pos->start; + event->mmap.pgoff = pos->pgoff; + event->mmap.pid = machine->pid; + + strlcpy(event->mmap.filename, kmap->name, PATH_MAX); + + if (perf_tool__process_synth_event(tool, event, machine, + process) != 0) { + rc = -1; + break; + } + } + + free(event); + return rc; +} + +#endif diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index aafa9878465f..0c8ecf0c78a4 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -88,10 +88,10 @@ static const char *perf_ns__name(unsigned int id) return perf_ns__names[id]; } -static int perf_tool__process_synth_event(struct perf_tool *tool, - union perf_event *event, - struct machine *machine, - perf_event__handler_t process) +int perf_tool__process_synth_event(struct perf_tool *tool, + union perf_event *event, + struct machine *machine, + perf_event__handler_t process) { struct perf_sample synth_sample = { .pid = -1, @@ -888,9 +888,16 @@ int kallsyms__get_function_start(const char *kallsyms_filename, return 0; } -int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) +int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, + perf_event__handler_t process __maybe_unused, + struct machine *machine __maybe_unused) +{ + return 0; +} + +static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) { size_t size; struct map *map = machine__kernel_map(machine); @@ -943,6 +950,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, return err; } +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int err; + + err = __perf_event__synthesize_kernel_mmap(tool, process, machine); + if (err < 0) + return err; + + return perf_event__synthesize_extra_kmaps(tool, process, machine); +} + int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 0f794744919c..bfa60bcafbde 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -750,6 +750,10 @@ int perf_event__process_exit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); +int perf_tool__process_synth_event(struct perf_tool *tool, + union perf_event *event, + struct machine *machine, + perf_event__handler_t process); int perf_event__process(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, @@ -796,6 +800,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, bool mmap_data, unsigned int proc_map_timeout); +int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); + size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index dd7ab0731167..e7b4a8b513f2 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1387,6 +1387,32 @@ static bool machine__uses_kcore(struct machine *machine) return false; } +static bool perf_event__is_extra_kernel_mmap(struct machine *machine, + union perf_event *event) +{ + return machine__is(machine, "x86_64") && + is_entry_trampoline(event->mmap.filename); +} + +static int machine__process_extra_kernel_map(struct machine *machine, + union perf_event *event) +{ + struct map *kernel_map = machine__kernel_map(machine); + struct dso *kernel = kernel_map ? kernel_map->dso : NULL; + struct extra_kernel_map xm = { + .start = event->mmap.start, + .end = event->mmap.start + event->mmap.len, + .pgoff = event->mmap.pgoff, + }; + + if (kernel == NULL) + return -1; + + strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); + + return machine__create_extra_kernel_map(machine, kernel, &xm); +} + static int machine__process_kernel_mmap_event(struct machine *machine, union perf_event *event) { @@ -1490,6 +1516,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine, */ dso__load(kernel, machine__kernel_map(machine)); } + } else if (perf_event__is_extra_kernel_mmap(machine, event)) { + return machine__process_extra_kernel_map(machine, event); } return 0; out_problem: -- cgit v1.2.3 From 787e4da9f95fd44376b3af6fa163ac0b3a48a1fc Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Tue, 22 May 2018 19:38:35 +0800 Subject: perf annotate: Show group event string for stdio When we enable the group, for tui/stdio2, the output first line includes the group event string. While for stdio, it will show only one event. For example, perf record -e cycles,branches ./div perf annotate --group --stdio Percent | Source code & Disassembly of div for cycles (44407 samples) ...... The first line doesn't include the event 'branches'. With this patch, it will show the correct group even string. perf annotate --group --stdio Percent | Source code & Disassembly of div for cycles, branches (44407 samples) ...... Signed-off-by: Jin Yao Suggested-by: Arnaldo Carvalho de Melo Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1526989115-14435-1-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 6612c7f90af4..71897689dacf 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1965,6 +1965,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, u64 len; int width = symbol_conf.show_total_period ? 12 : 8; int graph_dotted_len; + char buf[512]; filename = strdup(dso->long_name); if (!filename) @@ -1977,8 +1978,11 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, len = symbol__size(sym); - if (perf_evsel__is_group_event(evsel)) + if (perf_evsel__is_group_event(evsel)) { width *= evsel->nr_members; + perf_evsel__group_desc(evsel, buf, sizeof(buf)); + evsel_name = buf; + } graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", width, width, symbol_conf.show_total_period ? "Period" : -- cgit v1.2.3 From f6838209484d5cfb368ca5c61d150cc4054eef59 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:38 +0300 Subject: perf kcore_copy: Keep phdr data in a list Currently, kcore_copy makes 2 program headers, one for the kernel text (namely kernel_map) and one for the modules (namely modules_map). Now more program headers are needed, but treating each program header as a special case results in much more code. Instead, in preparation to add more program headers, change to keep program header data (phdr_data) in a list. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-11-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 48943b834f11..b13873a6f368 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1388,6 +1388,7 @@ struct phdr_data { off_t offset; u64 addr; u64 len; + struct list_head node; }; struct kcore_copy_info { @@ -1399,6 +1400,7 @@ struct kcore_copy_info { u64 last_module_symbol; struct phdr_data kernel_map; struct phdr_data modules_map; + struct list_head phdrs; }; static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, @@ -1510,6 +1512,11 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) return -1; + if (kci->kernel_map.len) + list_add_tail(&kci->kernel_map.node, &kci->phdrs); + if (kci->modules_map.len) + list_add_tail(&kci->modules_map.node, &kci->phdrs); + return 0; } @@ -1678,6 +1685,8 @@ int kcore_copy(const char *from_dir, const char *to_dir) char kcore_filename[PATH_MAX]; char extract_filename[PATH_MAX]; + INIT_LIST_HEAD(&kci.phdrs); + if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) return -1; -- cgit v1.2.3 From 6e97957d3d30552c415292bb08a0e5f3c459c027 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:39 +0300 Subject: perf kcore_copy: Keep a count of phdrs In preparation to add more program headers, keep a count of phdrs. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-12-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index b13873a6f368..4e7b71e8ac0e 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1398,6 +1398,7 @@ struct kcore_copy_info { u64 last_symbol; u64 first_module; u64 last_module_symbol; + size_t phnum; struct phdr_data kernel_map; struct phdr_data modules_map; struct list_head phdrs; @@ -1517,6 +1518,8 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) if (kci->modules_map.len) list_add_tail(&kci->modules_map.node, &kci->phdrs); + kci->phnum = !!kci->kernel_map.len + !!kci->modules_map.len; + return 0; } @@ -1678,7 +1681,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) { struct kcore kcore; struct kcore extract; - size_t count = 2; int idx = 0, err = -1; off_t offset = page_size, sz, modules_offset = 0; struct kcore_copy_info kci = { .stext = 0, }; @@ -1705,10 +1707,7 @@ int kcore_copy(const char *from_dir, const char *to_dir) if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) goto out_kcore_close; - if (!kci.modules_map.addr) - count -= 1; - - if (kcore__copy_hdr(&kcore, &extract, count)) + if (kcore__copy_hdr(&kcore, &extract, kci.phnum)) goto out_extract_close; if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, -- cgit v1.2.3 From c9dd1d894958b81a329ec01e7dd03b92eca52789 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:40 +0300 Subject: perf kcore_copy: Calculate offset from phnum In preparation to add more program headers, calculate offset from the number of phdrs. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-13-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4e7b71e8ac0e..4aec12102e19 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1682,7 +1682,7 @@ int kcore_copy(const char *from_dir, const char *to_dir) struct kcore kcore; struct kcore extract; int idx = 0, err = -1; - off_t offset = page_size, sz, modules_offset = 0; + off_t offset, sz, modules_offset = 0; struct kcore_copy_info kci = { .stext = 0, }; char kcore_filename[PATH_MAX]; char extract_filename[PATH_MAX]; @@ -1710,6 +1710,10 @@ int kcore_copy(const char *from_dir, const char *to_dir) if (kcore__copy_hdr(&kcore, &extract, kci.phnum)) goto out_extract_close; + offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) + + gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT); + offset = round_up(offset, page_size); + if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, kci.kernel_map.len)) goto out_extract_close; -- cgit v1.2.3 From 15acef6c3727cfe0bc9d1f6b273cca46689e8cd8 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:41 +0300 Subject: perf kcore_copy: Layout sections In preparation to add more program headers, layout the relative offset of each section. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-14-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4aec12102e19..3e76a0efd15c 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1386,6 +1386,7 @@ static off_t kcore__write(struct kcore *kcore) struct phdr_data { off_t offset; + off_t rel; u64 addr; u64 len; struct list_head node; @@ -1404,6 +1405,9 @@ struct kcore_copy_info { struct list_head phdrs; }; +#define kcore_copy__for_each_phdr(k, p) \ + list_for_each_entry((p), &(k)->phdrs, node) + static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, u64 start) { @@ -1518,11 +1522,21 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) if (kci->modules_map.len) list_add_tail(&kci->modules_map.node, &kci->phdrs); - kci->phnum = !!kci->kernel_map.len + !!kci->modules_map.len; - return 0; } +static void kcore_copy__layout(struct kcore_copy_info *kci) +{ + struct phdr_data *p; + off_t rel = 0; + + kcore_copy__for_each_phdr(kci, p) { + p->rel = rel; + rel += p->len; + kci->phnum += 1; + } +} + static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, Elf *elf) { @@ -1558,7 +1572,12 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, if (kci->first_module && !kci->last_module_symbol) return -1; - return kcore_copy__read_maps(kci, elf); + if (kcore_copy__read_maps(kci, elf)) + return -1; + + kcore_copy__layout(kci); + + return 0; } static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, -- cgit v1.2.3 From d2c959803c8843f64e419d833dc3722154c82492 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:42 +0300 Subject: perf kcore_copy: Iterate phdrs In preparation to add more program headers, iterate phdrs instead of assuming there is only one for the kernel text and one for the modules. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-15-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 3e76a0efd15c..91b8cfb045ec 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1701,10 +1701,11 @@ int kcore_copy(const char *from_dir, const char *to_dir) struct kcore kcore; struct kcore extract; int idx = 0, err = -1; - off_t offset, sz, modules_offset = 0; + off_t offset, sz; struct kcore_copy_info kci = { .stext = 0, }; char kcore_filename[PATH_MAX]; char extract_filename[PATH_MAX]; + struct phdr_data *p; INIT_LIST_HEAD(&kci.phdrs); @@ -1733,14 +1734,10 @@ int kcore_copy(const char *from_dir, const char *to_dir) gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT); offset = round_up(offset, page_size); - if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, - kci.kernel_map.len)) - goto out_extract_close; + kcore_copy__for_each_phdr(&kci, p) { + off_t offs = p->rel + offset; - if (kci.modules_map.addr) { - modules_offset = offset + kci.kernel_map.len; - if (kcore__add_phdr(&extract, idx, modules_offset, - kci.modules_map.addr, kci.modules_map.len)) + if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len)) goto out_extract_close; } @@ -1748,14 +1745,12 @@ int kcore_copy(const char *from_dir, const char *to_dir) if (sz < 0 || sz > offset) goto out_extract_close; - if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, - kci.kernel_map.len)) - goto out_extract_close; + kcore_copy__for_each_phdr(&kci, p) { + off_t offs = p->rel + offset; - if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, - extract.fd, modules_offset, - kci.modules_map.len)) - goto out_extract_close; + if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len)) + goto out_extract_close; + } if (kcore_copy__compare_file(from_dir, to_dir, "modules")) goto out_extract_close; -- cgit v1.2.3 From b4503cdb67098b2f08320c2c83df758ea72a4431 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:43 +0300 Subject: perf kcore_copy: Get rid of kernel_map In preparation to add more program headers, get rid of kernel_map and modules_map by moving ->kernel_map and ->modules_map to newly allocated entries in the ->phdrs list. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-16-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 70 ++++++++++++++++++++++++++++++++------------ 1 file changed, 52 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 91b8cfb045ec..37d9324c277c 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1400,14 +1400,47 @@ struct kcore_copy_info { u64 first_module; u64 last_module_symbol; size_t phnum; - struct phdr_data kernel_map; - struct phdr_data modules_map; struct list_head phdrs; }; #define kcore_copy__for_each_phdr(k, p) \ list_for_each_entry((p), &(k)->phdrs, node) +static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset) +{ + struct phdr_data *p = zalloc(sizeof(*p)); + + if (p) { + p->addr = addr; + p->len = len; + p->offset = offset; + } + + return p; +} + +static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci, + u64 addr, u64 len, + off_t offset) +{ + struct phdr_data *p = phdr_data__new(addr, len, offset); + + if (p) + list_add_tail(&p->node, &kci->phdrs); + + return p; +} + +static void kcore_copy__free_phdrs(struct kcore_copy_info *kci) +{ + struct phdr_data *p, *tmp; + + list_for_each_entry_safe(p, tmp, &kci->phdrs, node) { + list_del(&p->node); + free(p); + } +} + static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, u64 start) { @@ -1487,15 +1520,18 @@ static int kcore_copy__parse_modules(struct kcore_copy_info *kci, return 0; } -static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, - u64 s, u64 e) +static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end, + u64 pgoff, u64 s, u64 e) { - if (p->addr || s < start || s >= end) - return; + u64 len, offset; + + if (s < start || s >= end) + return 0; - p->addr = s; - p->offset = (s - start) + pgoff; - p->len = e < end ? e - s : end - s; + offset = (s - start) + pgoff; + len = e < end ? e - s : end - s; + + return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1; } static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) @@ -1503,11 +1539,12 @@ static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) struct kcore_copy_info *kci = data; u64 end = start + len; - kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, - kci->etext); + if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) + return -1; - kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, - kci->last_module_symbol); + if (kcore_copy__map(kci, start, end, pgoff, kci->first_module, + kci->last_module_symbol)) + return -1; return 0; } @@ -1517,11 +1554,6 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) return -1; - if (kci->kernel_map.len) - list_add_tail(&kci->kernel_map.node, &kci->phdrs); - if (kci->modules_map.len) - list_add_tail(&kci->modules_map.node, &kci->phdrs); - return 0; } @@ -1773,6 +1805,8 @@ out_unlink_kallsyms: if (err) kcore_copy__unlink(to_dir, "kallsyms"); + kcore_copy__free_phdrs(&kci); + return err; } -- cgit v1.2.3 From a1a3a0624e6cd0e2c46a7400800a5e687521a504 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:44 +0300 Subject: perf kcore_copy: Copy x86 PTI entry trampoline sections Identify and copy any sections for x86 PTI entry trampolines. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-17-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 37d9324c277c..584966913aeb 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1392,6 +1392,11 @@ struct phdr_data { struct list_head node; }; +struct sym_data { + u64 addr; + struct list_head node; +}; + struct kcore_copy_info { u64 stext; u64 etext; @@ -1401,6 +1406,7 @@ struct kcore_copy_info { u64 last_module_symbol; size_t phnum; struct list_head phdrs; + struct list_head syms; }; #define kcore_copy__for_each_phdr(k, p) \ @@ -1441,6 +1447,29 @@ static void kcore_copy__free_phdrs(struct kcore_copy_info *kci) } } +static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci, + u64 addr) +{ + struct sym_data *s = zalloc(sizeof(*s)); + + if (s) { + s->addr = addr; + list_add_tail(&s->node, &kci->syms); + } + + return s; +} + +static void kcore_copy__free_syms(struct kcore_copy_info *kci) +{ + struct sym_data *s, *tmp; + + list_for_each_entry_safe(s, tmp, &kci->syms, node) { + list_del(&s->node); + free(s); + } +} + static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, u64 start) { @@ -1471,6 +1500,9 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0; } + if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start)) + return -1; + return 0; } @@ -1538,6 +1570,7 @@ static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) { struct kcore_copy_info *kci = data; u64 end = start + len; + struct sym_data *sdat; if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) return -1; @@ -1546,6 +1579,13 @@ static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) kci->last_module_symbol)) return -1; + list_for_each_entry(sdat, &kci->syms, node) { + u64 s = round_down(sdat->addr, page_size); + + if (kcore_copy__map(kci, start, end, pgoff, s, s + len)) + return -1; + } + return 0; } @@ -1740,6 +1780,7 @@ int kcore_copy(const char *from_dir, const char *to_dir) struct phdr_data *p; INIT_LIST_HEAD(&kci.phdrs); + INIT_LIST_HEAD(&kci.syms); if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) return -1; @@ -1806,6 +1847,7 @@ out_unlink_kallsyms: kcore_copy__unlink(to_dir, "kallsyms"); kcore_copy__free_phdrs(&kci); + kcore_copy__free_syms(&kci); return err; } -- cgit v1.2.3 From 22916fdb9c50e8fb303bdcedca88fd8798a85844 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 May 2018 13:54:45 +0300 Subject: perf kcore_copy: Amend the offset of sections that remap kernel text x86 PTI entry trampolines all map to the same physical page. If that is reflected in the program headers of /proc/kcore, then do the same for the copy of kcore. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Andy Lutomirski Cc: Dave Hansen Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1526986485-6562-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 53 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 584966913aeb..29770ea61768 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1390,6 +1390,7 @@ struct phdr_data { u64 addr; u64 len; struct list_head node; + struct phdr_data *remaps; }; struct sym_data { @@ -1597,16 +1598,62 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) return 0; } +static void kcore_copy__find_remaps(struct kcore_copy_info *kci) +{ + struct phdr_data *p, *k = NULL; + u64 kend; + + if (!kci->stext) + return; + + /* Find phdr that corresponds to the kernel map (contains stext) */ + kcore_copy__for_each_phdr(kci, p) { + u64 pend = p->addr + p->len - 1; + + if (p->addr <= kci->stext && pend >= kci->stext) { + k = p; + break; + } + } + + if (!k) + return; + + kend = k->offset + k->len; + + /* Find phdrs that remap the kernel */ + kcore_copy__for_each_phdr(kci, p) { + u64 pend = p->offset + p->len; + + if (p == k) + continue; + + if (p->offset >= k->offset && pend <= kend) + p->remaps = k; + } +} + static void kcore_copy__layout(struct kcore_copy_info *kci) { struct phdr_data *p; off_t rel = 0; + kcore_copy__find_remaps(kci); + kcore_copy__for_each_phdr(kci, p) { - p->rel = rel; - rel += p->len; + if (!p->remaps) { + p->rel = rel; + rel += p->len; + } kci->phnum += 1; } + + kcore_copy__for_each_phdr(kci, p) { + struct phdr_data *k = p->remaps; + + if (k) + p->rel = p->offset - k->offset + k->rel; + } } static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, @@ -1821,6 +1868,8 @@ int kcore_copy(const char *from_dir, const char *to_dir) kcore_copy__for_each_phdr(&kci, p) { off_t offs = p->rel + offset; + if (p->remaps) + continue; if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len)) goto out_extract_close; } -- cgit v1.2.3