Skip to content

Commit

Permalink
Merge tag 'perf-core-2021-02-17' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/tip/tip

Pull performance event updates from Ingo Molnar:

 - Add CPU-PMU support for Intel Sapphire Rapids CPUs

 - Extend the perf ABI with PERF_SAMPLE_WEIGHT_STRUCT, to offer
   two-parameter sampling event feedback. Not used yet, but is intended
   for Golden Cove CPU-PMU, which can provide both the instruction
   latency and the cache latency information for memory profiling
   events.

 - Remove experimental, default-disabled perfmon-v4 counter_freezing
   support that could only be enabled via a boot option. The hardware is
   hopelessly broken, we'd like to make sure nobody starts relying on
   this, as it would only end in tears.

 - Fix energy/power events on Intel SPR platforms

 - Simplify the uprobes resume_execution() logic

 - Misc smaller fixes.

* tag 'perf-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/rapl: Fix psys-energy event on Intel SPR platform
  perf/x86/rapl: Only check lower 32bits for RAPL energy counters
  perf/x86/rapl: Add msr mask support
  perf/x86/kvm: Add Cascade Lake Xeon steppings to isolation_ucodes[]
  perf/x86/intel: Support CPUID 10.ECX to disable fixed counters
  perf/x86/intel: Add perf core PMU support for Sapphire Rapids
  perf/x86/intel: Filter unsupported Topdown metrics event
  perf/x86/intel: Factor out intel_update_topdown_event()
  perf/core: Add PERF_SAMPLE_WEIGHT_STRUCT
  perf/intel: Remove Perfmon-v4 counter_freezing support
  x86/perf: Use static_call for x86_pmu.guest_get_msrs
  perf/x86/intel/uncore: With > 8 nodes, get pci bus die id from NUMA info
  perf/x86/intel/uncore: Store the logical die id instead of the physical die id.
  x86/kprobes: Do not decode opcode in resume_execution()
  • Loading branch information
torvalds committed Feb 21, 2021
2 parents 657bd90 + 8bcfdd7 commit d310ec0
Show file tree
Hide file tree
Showing 19 changed files with 801 additions and 433 deletions.
6 changes: 0 additions & 6 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -945,12 +945,6 @@
causing system reset or hang due to sending
INIT from AP to BSP.

perf_v4_pmi= [X86,INTEL]
Format: <bool>
Disable Intel PMU counter freezing feature.
The feature only exists starting from
Arch Perfmon v4 (Skylake and newer).

disable_ddw [PPC/PSERIES]
Disable Dynamic DMA Window support. Use this
to workaround buggy firmware.
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/perf/core-book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -2195,7 +2195,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,

if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
ppmu->get_mem_weight)
ppmu->get_mem_weight(&data.weight);
ppmu->get_mem_weight(&data.weight.full);

if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
Expand Down
28 changes: 27 additions & 1 deletion arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);

DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);

u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
Expand Down Expand Up @@ -253,6 +255,8 @@ static bool check_hw_exists(void)
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
if (fixed_counter_disabled(i))
continue;
if (val & (0x03 << i*4)) {
bios_fail = 1;
val_fail = val;
Expand Down Expand Up @@ -665,6 +669,12 @@ void x86_pmu_disable_all(void)
}
}

struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
return static_call(x86_pmu_guest_get_msrs)(nr);
}
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);

/*
* There may be PMI landing after enabled=0. The PMI hitting could be before or
* after disable_all.
Expand Down Expand Up @@ -1523,6 +1533,8 @@ void perf_event_print_debug(void)
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
if (fixed_counter_disabled(idx))
continue;
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Expand Down Expand Up @@ -1923,13 +1935,22 @@ static void x86_pmu_static_call_update(void)

static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);

static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
}

static void _x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}

static inline struct perf_guest_switch_msr *
perf_guest_get_msrs_nop(int *nr)
{
*nr = 0;
return NULL;
}

static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
Expand Down Expand Up @@ -1995,12 +2016,17 @@ static int __init init_hw_perf_events(void)
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... fixed-purpose events: %lu\n",
hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
<< INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);

if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;

if (!x86_pmu.guest_get_msrs)
x86_pmu.guest_get_msrs = perf_guest_get_msrs_nop;

x86_pmu_static_call_update();

/*
Expand Down
Loading

0 comments on commit d310ec0

Please sign in to comment.