perf/core improvements and fixes:

perf report:
 
   Jin Yao:
 
   - Introduce --total-cycles, for basic block profiling, further using data
     obtained from LBR, an example should suffice:
 
       # perf record -b
       ^C[ perf record: Woken up 595 times to write data ]
       [ perf record: Captured and wrote 156.672 MB perf.data (196873 samples) ]
 
       # perf evlist -v
       cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CPU|PERIOD|BRANCH_STACK, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, precise_ip: 3, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, bpf_event: 1, branch_sample_type: ANY
 
       # perf report --total-cycles --stdio
       # To display the perf.data header info, please use --header/--header-only options.
       #
       # Total Lost Samples: 0
       #
       # Samples: 6M of event 'cycles'
       # Event count (approx.): 6299936
       #
       # Sampled  Sampled   Avg     Avg
       # Cycles%  Cycles  Cycles%  Cycles                 [Program Block Range]     Shared Object
       # .......  ......  .......  .....   ....................................  ................
       #
          2.17%     1.7M   0.08%     607       [compiler.h:199 -> common.c:221]  [kernel.vmlinux]
          0.72%   544.5K   0.03%     230     [entry_64.S:657 -> entry_64.S:662]  [kernel.vmlinux]
          0.56%   541.8K   0.09%     672       [compiler.h:199 -> common.c:300]  [kernel.vmlinux]
          0.39%   293.2K   0.01%     104   [list_debug.c:43 -> list_debug.c:61]  [kernel.vmlinux]
          0.36%   278.6K   0.03%     272   [entry_64.S:1289 -> entry_64.S:1308]  [kernel.vmlinux]
 
 perf record:
 
   Adrian Hunter:
 
   - Allow storing perf.data in a directory together with a copy of /proc/kcore.
 
   Jiwei Sun:
 
   - Add support for limit perf output file size, i.e.:
 
     # perf record --all-cpus -F 10000 --max-size=4M sleep 10h
     [ perf record: perf size limit reached (4097 KB), stopping session ]
     [ perf record: Woken up 6 times to write data ]
     [ perf record: Captured and wrote 4.048 MB perf.data (54094 samples) ]
     Terminated
     # ls -lah perf.data
     -rw-------. 1 root root 4.1M Nov  7 15:27 perf.data
     #
 
 perf stat:
 
   Jiri Olsa:
 
   - Add --per-node agregation support:
 
     In live mode:
 
       # perf stat  -a -I 1000 -e cycles --per-node
       #           time node   cpus             counts unit events
            1.000542550 N0       20          6,202,097      cycles
            1.000542550 N1       20            639,559      cycles
            2.002040063 N0       20          7,412,495      cycles
            2.002040063 N1       20          2,185,577      cycles
            3.003451699 N0       20          6,508,917      cycles
            3.003451699 N1       20            765,607      cycles
       ...
 
     Or in the record/report stat session:
 
       # perf stat record -a -I 1000 -e cycles
       #           time             counts unit events
            1.000536937         10,008,468      cycles
            2.002090152          9,578,539      cycles
            3.003625233          7,647,869      cycles
            4.005135036          7,032,086      cycles
       ^C     4.340902364          3,923,893      cycles
 
       # perf stat report --per-node
       #           time node   cpus             counts unit events
            1.000536937 N0       20          9,355,086      cycles
            1.000536937 N1       20            653,382      cycles
            2.002090152 N0       20          7,712,838      cycles
            2.002090152 N1       20          1,865,701      cycles
        ...
 
 perf probe:
 
   Masami Hiramatsu:
 
   Various fixes related to recent additions to the DWARF format:
 
   - Fix to find range-only function instance
 
   - Walk function lines in lexical blocks
 
   - Fix to show function entry line as probe-able
 
   - Fix wrong address verification
 
   - Fix to probe a function which has no entry pc
 
   - Fix to probe an inline function which has no entry pc
 
   - Fix to list probe event with correct line number
 
   - Fix to show inlined function callsite without entry_pc
 
   - Fix to show ranges of variables in functions without entry_pc
 
   - Return a better scope DIE if there is no best scope
 
   - Skip end-of-sequence and non statement lines
 
   - Filter out instances except for inlined subroutine and subprogram
 
   - Fix to show calling lines of inlined functions
 
   - Skip overlapped location on searching variables
 
 perf inject:
 
   Adrian Hunter:
 
   - Do not strip evsels with --strip, as they are needed for create_gcov
     (see the autofdo example in tools/perf/Documentation/intel-pt.txt).
 
 Intel PT:
 
   Adrian Hunter:
 
   - Intel PT uses an auxtrace_cache to store the results of code-walking, to avoid
     repeated decoding. Add an auxtrace_cache__remove to handle text poke events.
 
 core:
 
   Andi Kleen:
 
   - Always preserve errno while cleaning up perf_event_open failures.
 
 llvm:
 
   Arnaldo Carvalho de Melo:
 
   - No need to tell that the request for saving a .o file for BPF events, as
     expressed in ~/.perfconfig was satisfied, make that a debug message.
 
 perf vendor events:
 
 Intel:
 
   Haiyan Song:
 
   - Update CascadelakeX events to v1.05.
 
   - Update all the Intel JSON metrics from TMAM 3.6.
 
 Treewide:
 
   Ian Rogers:
 
   - Improve error paths, plugging leaks found using LLVM tools
     such as libFuzzer.
 
 jevents:
 
   Yunfeng Ye:
 
   - Fix resource leak in process_mapfile() and main()
 
 perf kvm:
 
   Igor Lubashev:
 
   - Use evlist layer api when possible.
 
 libsubcmd:
 
   James Clark:
 
   - Move EXTRA_FLAGS to the end to allow overriding existing flags.
 
   - Use -O0 with DEBUG=1
 
 perf diff:
 
   Jin Yao:
 
   - Don't use hack to skip column length calculation
 
 CoreSight ETM:
 
   Leo yan:
 
   - Fix definition of macro TO_CS_QUEUE_NR
 
 ARM64:
 
   John Garry:
 
   - Do not try to include libelf header files when its feature detection
     failed, fixing the cross build for ARM64.
 
 perf tests:
 
   Leo Yan:
 
   - Fix out of bounds memory access in the backward ring buffer test.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXcRowQAKCRCyPKLppCJ+
 JxHcAQCTtl9N3zkNjLWif1i6AGKNU9TzYpup+jDR5J83ggLqgQD+O931nR9wXUOe
 9bDUr45cNw3ZkRbc1558hKPWIsceJgU=
 =Rko+
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-5.5-20191107' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

perf report:

  Jin Yao:

  - Introduce --total-cycles, for basic block profiling, further using data
    obtained from LBR, an example should suffice:

      # perf record -b
      ^C[ perf record: Woken up 595 times to write data ]
      [ perf record: Captured and wrote 156.672 MB perf.data (196873 samples) ]

      # perf evlist -v
      cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CPU|PERIOD|BRANCH_STACK, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, precise_ip: 3, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, bpf_event: 1, branch_sample_type: ANY

      # perf report --total-cycles --stdio
      # To display the perf.data header info, please use --header/--header-only options.
      #
      # Total Lost Samples: 0
      #
      # Samples: 6M of event 'cycles'
      # Event count (approx.): 6299936
      #
      # Sampled  Sampled   Avg     Avg
      # Cycles%  Cycles  Cycles%  Cycles                 [Program Block Range]     Shared Object
      # .......  ......  .......  .....   ....................................  ................
      #
         2.17%     1.7M   0.08%     607       [compiler.h:199 -> common.c:221]  [kernel.vmlinux]
         0.72%   544.5K   0.03%     230     [entry_64.S:657 -> entry_64.S:662]  [kernel.vmlinux]
         0.56%   541.8K   0.09%     672       [compiler.h:199 -> common.c:300]  [kernel.vmlinux]
         0.39%   293.2K   0.01%     104   [list_debug.c:43 -> list_debug.c:61]  [kernel.vmlinux]
         0.36%   278.6K   0.03%     272   [entry_64.S:1289 -> entry_64.S:1308]  [kernel.vmlinux]

perf record:

  Adrian Hunter:

  - Allow storing perf.data in a directory together with a copy of /proc/kcore.

  Jiwei Sun:

  - Add support for limit perf output file size, i.e.:

    # perf record --all-cpus -F 10000 --max-size=4M sleep 10h
    [ perf record: perf size limit reached (4097 KB), stopping session ]
    [ perf record: Woken up 6 times to write data ]
    [ perf record: Captured and wrote 4.048 MB perf.data (54094 samples) ]
    Terminated
    # ls -lah perf.data
    -rw-------. 1 root root 4.1M Nov  7 15:27 perf.data
    #

perf stat:

  Jiri Olsa:

  - Add --per-node agregation support:

    In live mode:

      # perf stat  -a -I 1000 -e cycles --per-node
      #           time node   cpus             counts unit events
           1.000542550 N0       20          6,202,097      cycles
           1.000542550 N1       20            639,559      cycles
           2.002040063 N0       20          7,412,495      cycles
           2.002040063 N1       20          2,185,577      cycles
           3.003451699 N0       20          6,508,917      cycles
           3.003451699 N1       20            765,607      cycles
      ...

    Or in the record/report stat session:

      # perf stat record -a -I 1000 -e cycles
      #           time             counts unit events
           1.000536937         10,008,468      cycles
           2.002090152          9,578,539      cycles
           3.003625233          7,647,869      cycles
           4.005135036          7,032,086      cycles
      ^C     4.340902364          3,923,893      cycles

      # perf stat report --per-node
      #           time node   cpus             counts unit events
           1.000536937 N0       20          9,355,086      cycles
           1.000536937 N1       20            653,382      cycles
           2.002090152 N0       20          7,712,838      cycles
           2.002090152 N1       20          1,865,701      cycles
       ...

perf probe:

  Masami Hiramatsu:

  Various fixes related to recent additions to the DWARF format:

  - Fix to find range-only function instance

  - Walk function lines in lexical blocks

  - Fix to show function entry line as probe-able

  - Fix wrong address verification

  - Fix to probe a function which has no entry pc

  - Fix to probe an inline function which has no entry pc

  - Fix to list probe event with correct line number

  - Fix to show inlined function callsite without entry_pc

  - Fix to show ranges of variables in functions without entry_pc

  - Return a better scope DIE if there is no best scope

  - Skip end-of-sequence and non statement lines

  - Filter out instances except for inlined subroutine and subprogram

  - Fix to show calling lines of inlined functions

  - Skip overlapped location on searching variables

perf inject:

  Adrian Hunter:

  - Do not strip evsels with --strip, as they are needed for create_gcov
    (see the autofdo example in tools/perf/Documentation/intel-pt.txt).

Intel PT:

  Adrian Hunter:

  - Intel PT uses an auxtrace_cache to store the results of code-walking, to avoid
    repeated decoding. Add an auxtrace_cache__remove to handle text poke events.

core:

  Andi Kleen:

  - Always preserve errno while cleaning up perf_event_open failures.

llvm:

  Arnaldo Carvalho de Melo:

  - No need to tell that the request for saving a .o file for BPF events, as
    expressed in ~/.perfconfig was satisfied, make that a debug message.

perf vendor events:

Intel:

  Haiyan Song:

  - Update CascadelakeX events to v1.05.

  - Update all the Intel JSON metrics from TMAM 3.6.

Treewide:

  Ian Rogers:

  - Improve error paths, plugging leaks found using LLVM tools
    such as libFuzzer.

jevents:

  Yunfeng Ye:

  - Fix resource leak in process_mapfile() and main()

perf kvm:

  Igor Lubashev:

  - Use evlist layer api when possible.

libsubcmd:

  James Clark:

  - Move EXTRA_FLAGS to the end to allow overriding existing flags.

  - Use -O0 with DEBUG=1

perf diff:

  Jin Yao:

  - Don't use hack to skip column length calculation

CoreSight ETM:

  Leo yan:

  - Fix definition of macro TO_CS_QUEUE_NR

ARM64:

  John Garry:

  - Do not try to include libelf header files when its feature detection
    failed, fixing the cross build for ARM64.

perf tests:

  Leo Yan:

  - Fix out of bounds memory access in the backward ring buffer test.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2019-11-12 12:06:08 +01:00
commit 56b2147f34
87 changed files with 34182 additions and 31490 deletions

View file

@ -19,8 +19,7 @@ MAKEFLAGS += --no-print-directory
LIBFILE = $(OUTPUT)libsubcmd.a
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
CFLAGS := -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
@ -28,7 +27,9 @@ ifeq ($(DEBUG),0)
endif
endif
ifeq ($(CC_NO_CLANG), 0)
ifeq ($(DEBUG),1)
CFLAGS += -O0
else ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
else
CFLAGS += -O6
@ -43,6 +44,8 @@ CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
CFLAGS += -I$(srctree)/tools/include/
CFLAGS += $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
all:

View file

@ -571,6 +571,13 @@ config terms. For example: 'cycles/overwrite/' and 'instructions/no-overwrite/'.
Implies --tail-synthesize.
--kcore::
Make a copy of /proc/kcore and place it into a directory with the perf data file.
--max-size=<size>::
Limit the sample data max size, <size> is expected to be a number with
appended unit character - B/K/M/G
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]

View file

@ -525,6 +525,17 @@ include::itrace.txt[]
Configure time quantum for time sort key. Default 100ms.
Accepts s, us, ms, ns units.
--total-cycles::
When --total-cycles is specified, it supports sorting for all blocks by
'Sampled Cycles%'. This is useful to concentrate on the globally hottest
blocks. In output, there are some new columns:
'Sampled Cycles%' - block sampled cycles aggregation / total sampled cycles
'Sampled Cycles' - block sampled cycles aggregation
'Avg Cycles%' - block average sampled cycles / sum of total block average
sampled cycles
'Avg Cycles' - block average sampled cycles
include::callchain-overhead-calculation.txt[]
SEE ALSO

View file

@ -217,6 +217,11 @@ core number and the number of online logical processors on that physical process
Aggregate counts per monitored threads, when monitoring threads (-t option)
or processes (-p option).
--per-node::
Aggregate counts per NUMA nodes for system-wide mode measurements. This
is a useful mode to detect imbalance between NUMA nodes. To enable this
mode, use --per-node in addition to -a. (system-wide).
-D msecs::
--delay msecs::
After starting the program, wait msecs before measuring. This is useful to

View file

@ -0,0 +1,63 @@
perf.data directory format
DISCLAIMER This is not ABI yet and is subject to possible change
in following versions of perf. We will remove this
disclaimer once the directory format soaks in.
This document describes the on-disk perf.data directory format.
The layout is described by HEADER_DIR_FORMAT feature.
Currently it holds only version number (0):
HEADER_DIR_FORMAT = 24
struct {
uint64_t version;
}
The current only version value 0 means that:
- there is a single perf.data file named 'data' within the directory.
e.g.
$ tree -ps perf.data
perf.data
└── [-rw------- 25912] data
Future versions are expected to describe different data files
layout according to special needs.
Currently the only 'perf record' option to output to a directory is
the --kcore option which puts a copy of /proc/kcore into the directory.
e.g.
$ sudo perf record --kcore uname
Linux
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.015 MB perf.data (9 samples) ]
$ sudo tree -ps perf.data
perf.data
├── [-rw------- 23744] data
└── [drwx------ 4096] kcore_dir
├── [-r-------- 6731125] kallsyms
├── [-r-------- 40230912] kcore
└── [-r-------- 5419] modules
1 directory, 4 files
$ sudo perf script -v
build id event received for vmlinux: 1eaa285996affce2d74d8e66dcea09a80c9941de
build id event received for [vdso]: 8bbaf5dc62a9b644b4d4e4539737e104e4a84541
build id event received for /lib/x86_64-linux-gnu/libc-2.28.so: 5b157f49586a3ca84d55837f97ff466767dd3445
Samples for 'cycles' event do not have CPU attribute set. Skipping 'cpu' field.
Using CPUID GenuineIntel-6-8E-A
Using perf.data/kcore_dir/kcore for kernel data
Using perf.data/kcore_dir/kallsyms for symbols
perf 15316 2060795.480902: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
perf 15316 2060795.480906: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
perf 15316 2060795.480908: 7 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
perf 15316 2060795.480910: 119 cycles: ffffffffa2caa54a native_write_msr+0xa (vmlinux)
perf 15316 2060795.480912: 2109 cycles: ffffffffa2c9b7b0 native_apic_msr_write+0x0 (vmlinux)
perf 15316 2060795.480914: 37606 cycles: ffffffffa2f121fe perf_event_addr_filters_exec+0x2e (vmlinux)
uname 15316 2060795.480924: 588287 cycles: ffffffffa303a56d page_counter_try_charge+0x6d (vmlinux)
uname 15316 2060795.481067: 2261945 cycles: ffffffffa301438f kmem_cache_free+0x4f (vmlinux)
uname 15316 2060795.481643: 2172167 cycles: 7f1a48c393c0 _IO_un_link+0x0 (/lib/x86_64-linux-gnu/libc-2.28.so)

View file

@ -6,9 +6,10 @@
#include "symbol.h" // for the elf__needs_adjust_symbols() prototype
#include <stdbool.h>
#include <gelf.h>
#ifdef HAVE_LIBELF_SUPPORT
#include <gelf.h>
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
return ehdr.e_type == ET_EXEC ||

View file

@ -29,7 +29,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
return -1;
}
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
maps__for_each_entry(maps, pos) {
struct kmap *kmap;
size_t size;

View file

@ -201,7 +201,7 @@ static int process_branch_callback(struct evsel *evsel,
if (a.map != NULL)
a.map->dso->hit = 1;
hist__account_cycles(sample->branch_stack, al, sample, false);
hist__account_cycles(sample->branch_stack, al, sample, false, NULL);
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
return ret;

View file

@ -24,6 +24,7 @@
#include "util/annotate.h"
#include "util/map.h"
#include "util/spark.h"
#include "util/block-info.h"
#include <linux/err.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
@ -98,8 +99,6 @@ static s64 compute_wdiff_w2;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct addr_location dummy_al;
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@ -427,7 +426,8 @@ static int diff__process_sample_event(struct perf_tool *tool,
goto out_put;
}
hist__account_cycles(sample->branch_stack, &al, sample, false);
hist__account_cycles(sample->branch_stack, &al, sample, false,
NULL);
}
/*
@ -537,41 +537,6 @@ static void hists__baseline_only(struct hists *hists)
}
}
static int64_t block_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct block_info *bi_l = left->block_info;
struct block_info *bi_r = right->block_info;
int cmp;
if (!bi_l->sym || !bi_r->sym) {
if (!bi_l->sym && !bi_r->sym)
return 0;
else if (!bi_l->sym)
return -1;
else
return 1;
}
if (bi_l->sym == bi_r->sym) {
if (bi_l->start == bi_r->start) {
if (bi_l->end == bi_r->end)
return 0;
else
return (int64_t)(bi_r->end - bi_l->end);
} else
return (int64_t)(bi_r->start - bi_l->start);
} else {
cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
return cmp;
}
if (bi_l->sym->start != bi_r->sym->start)
return (int64_t)(bi_r->sym->start - bi_l->sym->start);
return (int64_t)(bi_r->sym->end - bi_l->sym->end);
}
static int64_t block_cycles_diff_cmp(struct hist_entry *left,
struct hist_entry *right)
{
@ -600,67 +565,13 @@ static void init_block_hist(struct block_hist *bh)
INIT_LIST_HEAD(&bh->block_fmt.list);
INIT_LIST_HEAD(&bh->block_fmt.sort_list);
bh->block_fmt.cmp = block_cmp;
bh->block_fmt.cmp = block_info__cmp;
bh->block_fmt.sort = block_sort;
perf_hpp_list__register_sort_field(&bh->block_list,
&bh->block_fmt);
bh->valid = true;
}
static void init_block_info(struct block_info *bi, struct symbol *sym,
struct cyc_hist *ch, int offset)
{
bi->sym = sym;
bi->start = ch->start;
bi->end = offset;
bi->cycles = ch->cycles;
bi->cycles_aggr = ch->cycles_aggr;
bi->num = ch->num;
bi->num_aggr = ch->num_aggr;
memcpy(bi->cycles_spark, ch->cycles_spark,
NUM_SPARKS * sizeof(u64));
}
static int process_block_per_sym(struct hist_entry *he)
{
struct annotation *notes;
struct cyc_hist *ch;
struct block_hist *bh;
if (!he->ms.map || !he->ms.sym)
return 0;
notes = symbol__annotation(he->ms.sym);
if (!notes || !notes->src || !notes->src->cycles_hist)
return 0;
bh = container_of(he, struct block_hist, he);
init_block_hist(bh);
ch = notes->src->cycles_hist;
for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
if (ch[i].num_aggr) {
struct block_info *bi;
struct hist_entry *he_block;
bi = block_info__new();
if (!bi)
return -1;
init_block_info(bi, he->ms.sym, &ch[i], i);
he_block = hists__add_entry_block(&bh->block_hists,
&dummy_al, bi);
if (!he_block) {
block_info__put(bi);
return -1;
}
}
}
return 0;
}
static int block_pair_cmp(struct hist_entry *a, struct hist_entry *b)
{
struct block_info *bi_a = a->block_info;
@ -765,13 +676,6 @@ static void block_hists_match(struct hists *hists_base,
}
}
static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
/* Skip the calculation of column length in output_resort */
he->filtered = true;
return 0;
}
static void hists__precompute(struct hists *hists)
{
struct rb_root_cached *root;
@ -792,8 +696,11 @@ static void hists__precompute(struct hists *hists)
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (compute == COMPUTE_CYCLES)
process_block_per_sym(he);
if (compute == COMPUTE_CYCLES) {
bh = container_of(he, struct block_hist, he);
init_block_hist(bh);
block_info__process_sym(he, bh, NULL, 0);
}
data__for_each_file_new(i, d) {
pair = get_pair_data(he, d);
@ -812,16 +719,18 @@ static void hists__precompute(struct hists *hists)
compute_wdiff(he, pair);
break;
case COMPUTE_CYCLES:
process_block_per_sym(pair);
bh = container_of(he, struct block_hist, he);
pair_bh = container_of(pair, struct block_hist,
he);
init_block_hist(pair_bh);
block_info__process_sym(pair, pair_bh, NULL, 0);
bh = container_of(he, struct block_hist, he);
if (bh->valid && pair_bh->valid) {
block_hists_match(&bh->block_hists,
&pair_bh->block_hists);
hists__output_resort_cb(&pair_bh->block_hists,
NULL, filter_cb);
hists__output_resort(&pair_bh->block_hists,
NULL);
}
break;
default:

View file

@ -578,58 +578,6 @@ static void strip_init(struct perf_inject *inject)
evsel->handler = drop_sample;
}
static bool has_tracking(struct evsel *evsel)
{
return evsel->core.attr.mmap || evsel->core.attr.mmap2 || evsel->core.attr.comm ||
evsel->core.attr.task;
}
#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
/*
* In order that the perf.data file is parsable, tracking events like MMAP need
* their selected event to exist, except if there is only 1 selected event left
* and it has a compatible sample type.
*/
static bool ok_to_remove(struct evlist *evlist,
struct evsel *evsel_to_remove)
{
struct evsel *evsel;
int cnt = 0;
bool ok = false;
if (!has_tracking(evsel_to_remove))
return true;
evlist__for_each_entry(evlist, evsel) {
if (evsel->handler != drop_sample) {
cnt += 1;
if ((evsel->core.attr.sample_type & COMPAT_MASK) ==
(evsel_to_remove->core.attr.sample_type & COMPAT_MASK))
ok = true;
}
}
return ok && cnt == 1;
}
static void strip_fini(struct perf_inject *inject)
{
struct evlist *evlist = inject->session->evlist;
struct evsel *evsel, *tmp;
/* Remove non-synthesized evsels if possible */
evlist__for_each_entry_safe(evlist, tmp, evsel) {
if (evsel->handler == drop_sample &&
ok_to_remove(evlist, evsel)) {
pr_debug("Deleting %s\n", perf_evsel__name(evsel));
evlist__remove(evlist, evsel);
evsel__delete(evsel);
}
}
}
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
@ -729,8 +677,6 @@ static int __cmd_inject(struct perf_inject *inject)
evlist__remove(session->evlist, evsel);
evsel__delete(evsel);
}
if (inject->strip)
strip_fini(inject);
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;

View file

@ -998,7 +998,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
done = perf_kvm__handle_stdin();
if (!rc && !done)
err = fdarray__poll(fda, 100);
err = evlist__poll(kvm->evlist, 100);
}
evlist__disable(kvm->evlist);

View file

@ -55,6 +55,9 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
@ -91,8 +94,11 @@ struct record {
struct switch_output switch_output;
unsigned long long samples;
cpu_set_t affinity_mask;
unsigned long output_max_size; /* = 0: unlimited */
};
static volatile int done;
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
@ -120,6 +126,12 @@ static bool switch_output_time(struct record *rec)
trigger_is_ready(&switch_output_trigger);
}
static bool record__output_max_size_exceeded(struct record *rec)
{
return rec->output_max_size &&
(rec->bytes_written >= rec->output_max_size);
}
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
void *bf, size_t size)
{
@ -132,6 +144,13 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
rec->bytes_written += size;
if (record__output_max_size_exceeded(rec) && !done) {
fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
" stopping session ]\n",
rec->bytes_written >> 10);
done = 1;
}
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
@ -496,7 +515,6 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
return record__write(rec, map, bf, size);
}
static volatile int done;
static volatile int signr = -1;
static volatile int child_finished;
@ -537,7 +555,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t padding;
u8 pad[8] = {0};
if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@ -699,6 +717,37 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
#endif
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
int fd;
scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
fd = open(kcore, O_RDONLY);
if (fd < 0)
return false;
close(fd);
return true;
}
static int record__kcore_copy(struct machine *machine, struct perf_data *data)
{
char from_dir[PATH_MAX];
char kcore_dir[PATH_MAX];
int ret;
snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
if (ret)
return ret;
return kcore_copy(from_dir, kcore_dir);
}
static int record__mmap_evlist(struct record *rec,
struct evlist *evlist)
{
@ -1383,6 +1432,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
session->header.env.comp_type = PERF_COMP_ZSTD;
session->header.env.comp_level = rec->opts.comp_level;
if (rec->opts.kcore &&
!record__kcore_readable(&session->machines.host)) {
pr_err("ERROR: kcore is not readable.\n");
return -1;
}
record__init_features(rec);
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
@ -1414,6 +1469,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
if (rec->opts.kcore) {
err = record__kcore_copy(&session->machines.host, data);
if (err) {
pr_err("ERROR: Failed to copy kcore\n");
goto out_child;
}
}
err = bpf__apply_obj_config();
if (err) {
char errbuf[BUFSIZ];
@ -1936,6 +1999,33 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
return 0;
}
static int parse_output_max_size(const struct option *opt,
const char *str, int unset)
{
unsigned long *s = (unsigned long *)opt->value;
static struct parse_tag tags_size[] = {
{ .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 },
{ .tag = 'M', .mult = 1 << 20 },
{ .tag = 'G', .mult = 1 << 30 },
{ .tag = 0 },
};
unsigned long val;
if (unset) {
*s = 0;
return 0;
}
val = parse_tag_value(str, tags_size);
if (val != (unsigned long) -1) {
*s = val;
return 0;
}
return -1;
}
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@ -2184,6 +2274,7 @@ static struct option __record_options[] = {
parse_cgroups),
OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
"ms to wait before starting measurement after program start"),
OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
@ -2262,6 +2353,8 @@ static struct option __record_options[] = {
"n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
record__parse_comp_level),
#endif
OPT_CALLBACK(0, "max-size", &record.output_max_size,
"size", "Limit the maximum size of the output file", parse_output_max_size),
OPT_END()
};
@ -2322,6 +2415,9 @@ int cmd_record(int argc, const char **argv)
}
if (rec->opts.kcore)
rec->data.is_dir = true;
if (rec->opts.comp_level != 0) {
pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
rec->no_buildid = true;

View file

@ -51,6 +51,7 @@
#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
#include "util/block-info.h"
#include <dlfcn.h>
#include <errno.h>
@ -96,10 +97,13 @@ struct report {
float min_percent;
u64 nr_entries;
u64 queue_size;
u64 total_cycles;
int socket_filter;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct branch_type_stat brtype_stat;
bool symbol_ipc;
bool total_cycles_mode;
struct block_report *block_reports;
};
static int report__config(const char *var, const char *value, void *cb)
@ -290,9 +294,10 @@ static int process_sample_event(struct perf_tool *tool,
if (al.map != NULL)
al.map->dso->hit = 1;
if (ui__has_annotation() || rep->symbol_ipc) {
if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
hist__account_cycles(sample->branch_stack, &al, sample,
rep->nonany_branch_mode);
rep->nonany_branch_mode,
&rep->total_cycles);
}
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
@ -480,11 +485,28 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
return ret + fprintf(fp, "\n#\n");
}
static int perf_evlist__tui_block_hists_browse(struct evlist *evlist,
struct report *rep)
{
struct evsel *pos;
int i = 0, ret;
evlist__for_each_entry(evlist, pos) {
ret = report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos);
if (ret != 0)
return ret;
}
return 0;
}
static int perf_evlist__tty_browse_hists(struct evlist *evlist,
struct report *rep,
const char *help)
{
struct evsel *pos;
int i = 0;
if (!quiet) {
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
@ -500,6 +522,13 @@ static int perf_evlist__tty_browse_hists(struct evlist *evlist,
continue;
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) {
report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos);
continue;
}
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
!(symbol_conf.use_callchain ||
symbol_conf.show_branchflag_count));
@ -582,6 +611,11 @@ static int report__browse_hists(struct report *rep)
switch (use_browser) {
case 1:
if (rep->total_cycles_mode) {
ret = perf_evlist__tui_block_hists_browse(evlist, rep);
break;
}
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
rep->min_percent,
&session->header.env,
@ -727,11 +761,9 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
{
size_t printed = 0;
struct rb_node *nd;
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
struct map *map;
maps__for_each_entry(maps, map) {
printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
indent, "", map->start, map->end,
map->prot & PROT_READ ? 'r' : '-',
@ -927,6 +959,13 @@ static int __cmd_report(struct report *rep)
report__output_resort(rep);
if (rep->total_cycles_mode) {
rep->block_reports = block_info__create_report(session->evlist,
rep->total_cycles);
if (!rep->block_reports)
return -1;
}
return report__browse_hists(rep);
}
@ -1211,6 +1250,8 @@ int cmd_report(int argc, const char **argv)
"Set time quantum for time sort key (default 100ms)",
parse_time_quantum),
OPTS_EVSWITCH(&report.evswitch),
OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
"Sort all blocks by 'Sampled Cycles%'"),
OPT_END()
};
struct perf_data data = {
@ -1373,6 +1414,13 @@ int cmd_report(int argc, const char **argv)
goto error;
}
if (report.total_cycles_mode) {
if (sort__mode != SORT_MODE__BRANCH)
report.total_cycles_mode = false;
else
sort_order = "sym";
}
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else
@ -1423,7 +1471,8 @@ int cmd_report(int argc, const char **argv)
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
if (ui__has_annotation() || report.symbol_ipc) {
if (ui__has_annotation() || report.symbol_ipc ||
report.total_cycles_mode) {
ret = symbol__annotation_init();
if (ret < 0)
goto error;
@ -1484,6 +1533,10 @@ int cmd_report(int argc, const char **argv)
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&report.ptime_range);
}
if (report.block_reports)
zfree(&report.block_reports);
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
return ret;

View file

@ -792,6 +792,8 @@ static struct option stat_options[] = {
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
"aggregate counts per numa node", AGGR_NODE),
OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
"ms to wait before starting measurement after program start"),
OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
@ -830,6 +832,12 @@ static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
return cpu_map__get_core(map, cpu, NULL);
}
static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
struct perf_cpu_map *map, int cpu)
{
return cpu_map__get_node(map, cpu, NULL);
}
static int perf_stat__get_aggr(struct perf_stat_config *config,
aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
{
@ -864,6 +872,12 @@ static int perf_stat__get_core_cached(struct perf_stat_config *config,
return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
}
static int perf_stat__get_node_cached(struct perf_stat_config *config,
struct perf_cpu_map *map, int idx)
{
return perf_stat__get_aggr(config, perf_stat__get_node, map, idx);
}
static bool term_percore_set(void)
{
struct evsel *counter;
@ -902,6 +916,13 @@ static int perf_stat_init_aggr_mode(void)
}
stat_config.aggr_get_id = perf_stat__get_core_cached;
break;
case AGGR_NODE:
if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_node_cached;
break;
case AGGR_NONE:
if (term_percore_set()) {
if (cpu_map__build_core_map(evsel_list->core.cpus,
@ -1014,6 +1035,13 @@ static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
return core;
}
static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data)
{
int cpu = perf_env__get_cpu(data, map, idx);
return perf_env__numa_node(data, cpu);
}
static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
struct perf_cpu_map **sockp)
{
@ -1032,6 +1060,12 @@ static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *c
return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
}
static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus,
struct perf_cpu_map **nodep)
{
return cpu_map__build_map(cpus, nodep, perf_env__get_node, env);
}
static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu_map *map, int idx)
{
@ -1049,6 +1083,12 @@ static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unus
return perf_env__get_core(map, idx, &perf_stat.session->header.env);
}
static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu_map *map, int idx)
{
return perf_env__get_node(map, idx, &perf_stat.session->header.env);
}
static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
{
struct perf_env *env = &st->session->header.env;
@ -1075,6 +1115,13 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
}
stat_config.aggr_get_id = perf_stat__get_core_file;
break;
case AGGR_NODE:
if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_node_file;
break;
case AGGR_NONE:
case AGGR_GLOBAL:
case AGGR_THREAD:
@ -1622,6 +1669,8 @@ static int __cmd_report(int argc, const char **argv)
"aggregate counts per processor die", AGGR_DIE),
OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
"aggregate counts per numa node", AGGR_NODE),
OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_END()
@ -1896,6 +1945,9 @@ int cmd_stat(int argc, const char **argv)
}
}
if (stat_config.aggr_mode == AGGR_NODE)
cpu__setup_cpunode_map();
if (stat_config.times && interval)
interval_count = true;
else if (stat_config.times && !interval) {

View file

@ -725,7 +725,8 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
NULL);
return 0;
}

View file

@ -120,7 +120,8 @@ void perf_evsel__close_fd(struct perf_evsel *evsel)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
close(FD(evsel, cpu, thread));
if (FD(evsel, cpu, thread) >= 0)
close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
}

View file

@ -1,352 +1,352 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts",
"MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts_SMT",
"MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,370 +1,370 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts",
"MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts_SMT",
"MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

File diff suppressed because it is too large Load diff

View file

@ -1,394 +1,412 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts",
"MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts_SMT",
"MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
"MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
"MetricGroup": "",
"MetricName": "L2_Evictions_Silent_PKI"
},
{
"BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
"MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
"MetricGroup": "",
"MetricName": "L2_Evictions_NonSilent_PKI"
},
{
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
"MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_Lat",
"MetricName": "MEM_PMM_Read_Latency"
},
{
"MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
"MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Read_BW"
},
{
"MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
"MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Write_BW"
},
{
"MetricExpr": "cha_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cha_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
"MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
"MetricGroup": "",
"MetricName": "IpFarBranch"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,85 +1,85 @@
[
{
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x2"
},
{
"EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x8"
},
{
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
"BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"UMask": "0x20",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC7",
"UMask": "0x40",
"BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x40"
},
{
"EventCode": "0xC7",
"UMask": "0x80",
"BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 16 calculations per element.",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
{
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x80"
},
{
"EventCode": "0xCA",
"UMask": "0x1e",
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.ANY",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
"PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1e"
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"UMask": "0x10"
}
]

View file

@ -1,482 +1,482 @@
[
{
"EventCode": "0x79",
"UMask": "0x4",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_CYCLES",
"CounterMask": "1",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x4",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x8",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
"EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x8",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x10",
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
"PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x18",
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
"PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x18",
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1"
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"EventCode": "0x79",
"UMask": "0x20",
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x20"
},
{
"EventCode": "0x79",
"UMask": "0x24",
"BriefDescription": "Cycles MITE is delivering any Uop",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x200206",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
"MSRValue": "0x300206",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
"PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"Invert": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1"
},
{
"EventCode": "0x79",
"UMask": "0x24",
"BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
"PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x30",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_CYCLES",
"CounterMask": "1",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x30",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x30"
},
{
"EdgeDetect": "1",
"EventCode": "0x79",
"UMask": "0x30",
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_SWITCHES",
"CounterMask": "1",
"PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"EventCode": "0x80",
"UMask": "0x4",
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"Counter": "0,1,2,3",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x83",
"UMask": "0x1",
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
"EventName": "ICACHE_64B.IFTAG_HIT",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x83",
"UMask": "0x2",
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
"EventName": "ICACHE_64B.IFTAG_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x83",
"UMask": "0x4",
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"Counter": "0,1,2,3",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"Invert": "1",
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"CounterMask": "2",
"PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x8"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"CounterMask": "3",
"PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
"PEBS": "1",
"PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"CounterMask": "4",
"PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x4"
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
"UMask": "0x30"
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1"
},
{
"EventCode": "0xAB",
"UMask": "0x2",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x2"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x400406",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x200206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x400206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"PEBS": "1",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"PEBS": "1",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"PEBS": "1",
"MSRValue": "0x13",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"PEBS": "1",
"MSRValue": "0x12",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
"PEBS": "1",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x300206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x420006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x410006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x408006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x404006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x400806",
"PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x400206",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x400406",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
"PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x408006",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x404006",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
"PEBS": "1",
"PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x410006",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"UMask": "0x24"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x401006",
"PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x402006",
"PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_SWITCHES",
"PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"SampleAfterValue": "2000003",
"UMask": "0x30"
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
"PEBS": "1",
"PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x420006",
"PEBS": "1",
"SampleAfterValue": "100007",
"TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"UMask": "0x24"
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
"UMask": "0x4"
}
]

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -73,6 +73,22 @@
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "UNC_M_PMM_WPQ_INSERTS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
@ -102,6 +118,15 @@
"ScaleUnit": "6.103515625E-5MB/sec",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
"Counter": "0,1,2,3",
@ -113,5 +138,171 @@
"ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Page Activate commands sent due to a write request",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
"PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM CAS Commands issued",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
"PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0xF",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
"PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Underfill Read CAS Commands issued",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
"PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
"PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "All commands for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.ALL",
"PerPkg": "1",
"PublicDescription": "All commands for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Regular reads(RPQ) commands for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.RD",
"PerPkg": "1",
"PublicDescription": "All Reads - RPQ or Ufill",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill read commands for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.UFILL_RD",
"PerPkg": "1",
"PublicDescription": "Underfill reads",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Write commands for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.WR",
"PerPkg": "1",
"PublicDescription": "Writes",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries in the Read Pending Queue (RPQ) at each cycle. This can then be used to calculate both the average occupancy of the queue (in conjunction with the number of cycles not empty) and the average latency in the queue (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate from the RPQ after the CAS command has been issued to memory.",
"Unit": "iMC"
},
{
"BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
"Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
"PublicDescription": "Tag Check; Hit",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
"Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
"PublicDescription": "Tag Check; Clean",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
"Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
"PublicDescription": "Tag Check; Dirty",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of writes requests allocated into the Write Pending Queue (WPQ). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (Memory Controller). The write requests deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts???",
"Unit": "iMC"
}
]

File diff suppressed because it is too large Load diff

View file

@ -1,285 +1,284 @@
[
{
"EventCode": "0x08",
"UMask": "0x1",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x2",
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x4",
"BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x8",
"BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0xe",
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"CounterMask": "1",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x20",
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x2",
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x4",
"BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x4"
},
{
"EventCode": "0x49",
"UMask": "0x8",
"BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0xe",
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"CounterMask": "1",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x20",
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"Counter": "0,1,2,3",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0x1",
"BriefDescription": "Misses at all ITLB levels that cause page walks",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0x2",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0x4",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0x8",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0xe",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x10"
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"MSRValue": "0x00",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
"CounterMask": "1",
"PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0xe"
},
{
"EventCode": "0x85",
"UMask": "0x20",
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.STLB_HIT",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x1"
},
{
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"Counter": "0,1,2,3",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xBD",
"UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
"Counter": "0,1,2,3",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xBD",
"UMask": "0x20",
"BriefDescription": "STLB flush attempts",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
"UMask": "0x20"
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
"SampleAfterValue": "100003",
"UMask": "0x10"
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"UMask": "0x8"
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x4F",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
"UMask": "0x10"
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
"UMask": "0x20"
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0xe"
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"UMask": "0x2"
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0x2"
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0x8"
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
"UMask": "0x20"
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0xe"
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
"PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "100003",
"UMask": "0x10"
},
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
"UMask": "0x1"
}
]

View file

@ -1,322 +1,322 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,340 +1,340 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,340 +1,340 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,346 +1,346 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,232 +1,232 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,226 +1,226 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,364 +1,370 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts",
"MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts_SMT",
"MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
"MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
"MetricGroup": "",
"MetricName": "IpFarBranch"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -1,376 +1,394 @@
[
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Frontend_Bound"
"MetricName": "Frontend_Bound",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Frontend_Bound_SMT"
"MetricName": "Frontend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Bad_Speculation"
"MetricName": "Bad_Speculation",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Bad_Speculation_SMT"
"MetricName": "Bad_Speculation_SMT",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound"
"MetricName": "Backend_Bound",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Backend_Bound_SMT"
"MetricName": "Backend_Bound_SMT",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring"
"MetricName": "Retiring",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
"MetricName": "Retiring_SMT"
"MetricName": "Retiring_SMT",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
"MetricGroup": "Pipeline;Retiring",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
"MetricGroup": "Branches;PGO",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
"MetricGroup": "PGO",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricGroup": "DSB;Frontend_Bandwidth",
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
"BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
"MetricGroup": "Instruction_Type;L1_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
"BriefDescription": "Instructions per Store",
"MetricGroup": "Instruction_Type;Store_Bound",
"MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"BriefDescription": "Instructions per Branch",
"MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
"MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
"MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
"MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts",
"MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
"MetricGroup": "Branch_Mispredicts_SMT",
"MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
"MetricGroup": "Branch_Mispredicts",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricGroup": "Cache_Misses;",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
"MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
"MetricGroup": "",
"MetricName": "L2_Evictions_Silent_PKI"
},
{
"BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
"MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
"MetricGroup": "",
"MetricName": "L2_Evictions_NonSilent_PKI"
},
{
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
"BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
"MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"MetricExpr": "cha_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cha_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
"MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
"MetricGroup": "",
"MetricName": "IpFarBranch"
},
{
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
"BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
"BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
"BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]

View file

@ -772,6 +772,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
char *line, *p;
int line_num;
char *tblname;
int ret = 0;
pr_info("%s: Processing mapfile %s\n", prog, fpath);
@ -783,6 +784,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
if (!mapfp) {
pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
fpath);
free(line);
return -1;
}
@ -809,7 +811,8 @@ static int process_mapfile(FILE *outfp, char *fpath)
/* TODO Deal with lines longer than 16K */
pr_info("%s: Mapfile %s: line %d too long, aborting\n",
prog, fpath, line_num);
return -1;
ret = -1;
goto out;
}
line[strlen(line)-1] = '\0';
@ -839,7 +842,9 @@ static int process_mapfile(FILE *outfp, char *fpath)
out:
print_mapping_table_suffix(outfp);
return 0;
fclose(mapfp);
free(line);
return ret;
}
/*
@ -1136,6 +1141,7 @@ int main(int argc, char *argv[])
goto empty_map;
} else if (rc < 0) {
/* Make build fail */
fclose(eventsfp);
free_arch_std_events();
return 1;
} else if (rc) {
@ -1148,6 +1154,7 @@ int main(int argc, char *argv[])
goto empty_map;
} else if (rc < 0) {
/* Make build fail */
fclose(eventsfp);
free_arch_std_events();
return 1;
} else if (rc) {
@ -1165,6 +1172,8 @@ int main(int argc, char *argv[])
if (process_mapfile(eventsfp, mapfile)) {
pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
/* Make build fail */
fclose(eventsfp);
free_arch_std_events();
return 1;
}

View file

@ -148,6 +148,15 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
goto out_delete_evlist;
}
evlist__close(evlist);
err = evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
err = do_test(evlist, 1, &sample_count, &comm_count);
if (err != TEST_OK)
goto out_delete_evlist;

View file

@ -295,7 +295,7 @@ bool test__bp_signal_is_supported(void)
* breakpointed instruction.
*
* Since arm64 has the same issue with arm for the single-step
* handling, this case also gets suck on the breakpointed
* handling, this case also gets stuck on the breakpointed
* instruction.
*
* Just disable the test for these architectures until these

View file

@ -18,17 +18,16 @@ static int check_maps(struct map_def *merged, unsigned int size, struct map_grou
struct map *map;
unsigned int i = 0;
map = map_groups__first(mg);
while (map) {
map_groups__for_each_entry(mg, map) {
if (i > 0)
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start);
TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end);
TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name));
TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2);
i++;
map = map_groups__next(map);
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
}
return TEST_OK;

View file

@ -182,7 +182,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
header_printed = false;
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
struct map *
/*
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
@ -207,7 +207,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
header_printed = false;
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
struct map *pair;
mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
@ -237,7 +237,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
maps = machine__kernel_maps(&kallsyms);
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
if (!map->priv) {
if (!header_printed) {
pr_info("WARN: Maps only in kallsyms:\n");

View file

@ -26,6 +26,7 @@
#include "../../util/sort.h"
#include "../../util/top.h"
#include "../../util/thread.h"
#include "../../util/block-info.h"
#include "../../arch/common.h"
#include "../../perf.h"
@ -1783,7 +1784,11 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
continue;
}
percent = hist_entry__get_percent_limit(h);
if (symbol_conf.report_individual_block)
percent = block_info__total_cycles_percent(h);
else
percent = hist_entry__get_percent_limit(h);
if (percent < hb->min_pcnt)
continue;

View file

@ -5,6 +5,7 @@
#include "ui/browser.h"
struct annotation_options;
struct evsel;
struct hist_browser {
struct ui_browser b;
@ -15,6 +16,7 @@ struct hist_browser {
struct pstack *pstack;
struct perf_env *env;
struct annotation_options *annotation_opts;
struct evsel *block_evsel;
int print_seq;
bool show_dso;
bool show_headers;

View file

@ -15,6 +15,7 @@
#include "../../util/srcline.h"
#include "../../util/string2.h"
#include "../../util/thread.h"
#include "../../util/block-info.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
@ -558,6 +559,25 @@ static int hist_entry__block_fprintf(struct hist_entry *he,
return ret;
}
static int hist_entry__individual_block_fprintf(struct hist_entry *he,
char *bf, size_t size,
FILE *fp)
{
int ret = 0;
struct perf_hpp hpp = {
.buf = bf,
.size = size,
.skip = false,
};
hist_entry__snprintf(he, &hpp);
if (!hpp.skip)
ret += fprintf(fp, "%s\n", bf);
return ret;
}
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
char *bf, size_t bfsz, FILE *fp,
bool ignore_callchains)
@ -580,6 +600,9 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
if (symbol_conf.report_block)
return hist_entry__block_fprintf(he, bf, size, fp);
if (symbol_conf.report_individual_block)
return hist_entry__individual_block_fprintf(he, bf, size, fp);
hist_entry__snprintf(he, &hpp);
ret = fprintf(fp, "%s\n", bf);
@ -834,7 +857,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
if (h->filtered)
continue;
percent = hist_entry__get_percent_limit(h);
if (symbol_conf.report_individual_block)
percent = block_info__total_cycles_percent(h);
else
percent = hist_entry__get_percent_limit(h);
if (percent < min_pcnt)
continue;

View file

@ -1,4 +1,5 @@
perf-y += annotate.o
perf-y += block-info.o
perf-y += block-range.o
perf-y += build-id.o
perf-y += cacheline.o

View file

@ -1892,7 +1892,7 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len)
}
/* Expand the last region. */
len = line_len + 1 - src;
len = line_len - src;
memcpy(&new_line[dst], &line[src], len);
dst += len;
new_line[dst] = '\0';

View file

@ -1457,6 +1457,34 @@ int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
return 0;
}
static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
u32 key)
{
struct auxtrace_cache_entry *entry;
struct hlist_head *hlist;
struct hlist_node *n;
if (!c)
return NULL;
hlist = &c->hashtable[hash_32(key, c->bits)];
hlist_for_each_entry_safe(entry, n, hlist, hash) {
if (entry->key == key) {
hlist_del(&entry->hash);
return entry;
}
}
return NULL;
}
void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
{
struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
auxtrace_cache__free_entry(c, entry);
}
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
{
struct auxtrace_cache_entry *entry;

View file

@ -489,6 +489,7 @@ void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
struct auxtrace_cache_entry *entry);
void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,

View file

@ -0,0 +1,538 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdlib.h>
#include <string.h>
#include <linux/zalloc.h>
#include "block-info.h"
#include "sort.h"
#include "annotate.h"
#include "symbol.h"
#include "dso.h"
#include "map.h"
#include "srcline.h"
#include "evlist.h"
#include "ui/browsers/hists.h"
static struct block_header_column {
const char *name;
int width;
} block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = {
.name = "Sampled Cycles%",
.width = 15,
},
[PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = {
.name = "Sampled Cycles",
.width = 14,
},
[PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = {
.name = "Avg Cycles%",
.width = 11,
},
[PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = {
.name = "Avg Cycles",
.width = 10,
},
[PERF_HPP_REPORT__BLOCK_RANGE] = {
.name = "[Program Block Range]",
.width = 70,
},
[PERF_HPP_REPORT__BLOCK_DSO] = {
.name = "Shared Object",
.width = 20,
}
};
struct block_info *block_info__get(struct block_info *bi)
{
if (bi)
refcount_inc(&bi->refcnt);
return bi;
}
void block_info__put(struct block_info *bi)
{
if (bi && refcount_dec_and_test(&bi->refcnt))
free(bi);
}
struct block_info *block_info__new(void)
{
struct block_info *bi = zalloc(sizeof(*bi));
if (bi)
refcount_set(&bi->refcnt, 1);
return bi;
}
int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct block_info *bi_l = left->block_info;
struct block_info *bi_r = right->block_info;
int cmp;
if (!bi_l->sym || !bi_r->sym) {
if (!bi_l->sym && !bi_r->sym)
return 0;
else if (!bi_l->sym)
return -1;
else
return 1;
}
if (bi_l->sym == bi_r->sym) {
if (bi_l->start == bi_r->start) {
if (bi_l->end == bi_r->end)
return 0;
else
return (int64_t)(bi_r->end - bi_l->end);
} else
return (int64_t)(bi_r->start - bi_l->start);
} else {
cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
return cmp;
}
if (bi_l->sym->start != bi_r->sym->start)
return (int64_t)(bi_r->sym->start - bi_l->sym->start);
return (int64_t)(bi_r->sym->end - bi_l->sym->end);
}
static void init_block_info(struct block_info *bi, struct symbol *sym,
struct cyc_hist *ch, int offset,
u64 total_cycles)
{
bi->sym = sym;
bi->start = ch->start;
bi->end = offset;
bi->cycles = ch->cycles;
bi->cycles_aggr = ch->cycles_aggr;
bi->num = ch->num;
bi->num_aggr = ch->num_aggr;
bi->total_cycles = total_cycles;
memcpy(bi->cycles_spark, ch->cycles_spark,
NUM_SPARKS * sizeof(u64));
}
int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
u64 *block_cycles_aggr, u64 total_cycles)
{
struct annotation *notes;
struct cyc_hist *ch;
static struct addr_location al;
u64 cycles = 0;
if (!he->ms.map || !he->ms.sym)
return 0;
memset(&al, 0, sizeof(al));
al.map = he->ms.map;
al.sym = he->ms.sym;
notes = symbol__annotation(he->ms.sym);
if (!notes || !notes->src || !notes->src->cycles_hist)
return 0;
ch = notes->src->cycles_hist;
for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
if (ch[i].num_aggr) {
struct block_info *bi;
struct hist_entry *he_block;
bi = block_info__new();
if (!bi)
return -1;
init_block_info(bi, he->ms.sym, &ch[i], i,
total_cycles);
cycles += bi->cycles_aggr / bi->num_aggr;
he_block = hists__add_entry_block(&bh->block_hists,
&al, bi);
if (!he_block) {
block_info__put(bi);
return -1;
}
}
}
if (block_cycles_aggr)
*block_cycles_aggr += cycles;
return 0;
}
static int block_column_header(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hists *hists __maybe_unused,
int line __maybe_unused,
int *span __maybe_unused)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
block_fmt->header);
}
static int block_column_width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists __maybe_unused)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
return block_fmt->width;
}
static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
double ratio = 0.0;
char buf[16];
if (block_fmt->total_cycles)
ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
sprintf(buf, "%.2f%%", 100.0 * ratio);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
}
static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
struct hist_entry *left,
struct hist_entry *right)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi_l = left->block_info;
struct block_info *bi_r = right->block_info;
double l, r;
if (block_fmt->total_cycles) {
l = ((double)bi_l->cycles /
(double)block_fmt->total_cycles) * 100000.0;
r = ((double)bi_r->cycles /
(double)block_fmt->total_cycles) * 100000.0;
return (int64_t)l - (int64_t)r;
}
return 0;
}
static void cycles_string(u64 cycles, char *buf, int size)
{
if (cycles >= 1000000)
scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0);
else if (cycles >= 1000)
scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0);
else
scnprintf(buf, size, "%1d", cycles);
}
static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char cycles_buf[16];
cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf));
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
cycles_buf);
}
static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp, struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
double ratio = 0.0;
u64 avg;
char buf[16];
if (block_fmt->block_cycles && bi->num_aggr) {
avg = bi->cycles_aggr / bi->num_aggr;
ratio = (double)avg / (double)block_fmt->block_cycles;
}
sprintf(buf, "%.2f%%", 100.0 * ratio);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
}
static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char cycles_buf[16];
cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf,
sizeof(cycles_buf));
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
cycles_buf);
}
static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct block_info *bi = he->block_info;
char buf[128];
char *start_line, *end_line;
symbol_conf.disable_add2line_warn = true;
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
he->ms.sym);
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
he->ms.sym);
if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
scnprintf(buf, sizeof(buf), "[%s -> %s]",
start_line, end_line);
} else {
scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]",
bi->start, bi->end);
}
free_srcline(start_line);
free_srcline(end_line);
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
}
static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
struct map *map = he->ms.map;
if (map && map->dso) {
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
map->dso->short_name);
}
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
"[unknown]");
}
static void init_block_header(struct block_fmt *block_fmt)
{
struct perf_hpp_fmt *fmt = &block_fmt->fmt;
BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX);
block_fmt->header = block_columns[block_fmt->idx].name;
block_fmt->width = block_columns[block_fmt->idx].width;
fmt->header = block_column_header;
fmt->width = block_column_width;
}
static void hpp_register(struct block_fmt *block_fmt, int idx,
struct perf_hpp_list *hpp_list)
{
struct perf_hpp_fmt *fmt = &block_fmt->fmt;
block_fmt->idx = idx;
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
switch (idx) {
case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT:
fmt->entry = block_total_cycles_pct_entry;
fmt->cmp = block_info__cmp;
fmt->sort = block_total_cycles_pct_sort;
break;
case PERF_HPP_REPORT__BLOCK_LBR_CYCLES:
fmt->entry = block_cycles_lbr_entry;
break;
case PERF_HPP_REPORT__BLOCK_CYCLES_PCT:
fmt->entry = block_cycles_pct_entry;
break;
case PERF_HPP_REPORT__BLOCK_AVG_CYCLES:
fmt->entry = block_avg_cycles_entry;
break;
case PERF_HPP_REPORT__BLOCK_RANGE:
fmt->entry = block_range_entry;
break;
case PERF_HPP_REPORT__BLOCK_DSO:
fmt->entry = block_dso_entry;
break;
default:
return;
}
init_block_header(block_fmt);
perf_hpp_list__column_register(hpp_list, fmt);
}
static void register_block_columns(struct perf_hpp_list *hpp_list,
struct block_fmt *block_fmts)
{
for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++)
hpp_register(&block_fmts[i], i, hpp_list);
}
static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts)
{
__hists__init(&bh->block_hists, &bh->block_list);
perf_hpp_list__init(&bh->block_list);
bh->block_list.nr_header_lines = 1;
register_block_columns(&bh->block_list, block_fmts);
perf_hpp_list__register_sort_field(&bh->block_list,
&block_fmts[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT].fmt);
}
static void process_block_report(struct hists *hists,
struct block_report *block_report,
u64 total_cycles)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct block_hist *bh = &block_report->hist;
struct hist_entry *he;
init_block_hist(bh, block_report->fmts);
while (next) {
he = rb_entry(next, struct hist_entry, rb_node);
block_info__process_sym(he, bh, &block_report->cycles,
total_cycles);
next = rb_next(&he->rb_node);
}
for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) {
block_report->fmts[i].total_cycles = total_cycles;
block_report->fmts[i].block_cycles = block_report->cycles;
}
hists__output_resort(&bh->block_hists, NULL);
}
struct block_report *block_info__create_report(struct evlist *evlist,
u64 total_cycles)
{
struct block_report *block_reports;
int nr_hists = evlist->core.nr_entries, i = 0;
struct evsel *pos;
block_reports = calloc(nr_hists, sizeof(struct block_report));
if (!block_reports)
return NULL;
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
process_block_report(hists, &block_reports[i], total_cycles);
i++;
}
return block_reports;
}
#ifdef HAVE_SLANG_SUPPORT
static int block_hists_browser__title(struct hist_browser *browser, char *bf,
size_t size)
{
struct hists *hists = evsel__hists(browser->block_evsel);
const char *evname = perf_evsel__name(browser->block_evsel);
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
int ret;
ret = scnprintf(bf, size, "# Samples: %lu", nr_samples);
if (evname)
scnprintf(bf + ret, size - ret, " of event '%s'", evname);
return 0;
}
static int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
float min_percent)
{
struct hists *hists = &bh->block_hists;
struct hist_browser *browser;
int key = -1;
static const char help[] =
" q Quit \n";
browser = hist_browser__new(hists);
if (!browser)
return -1;
browser->block_evsel = evsel;
browser->title = block_hists_browser__title;
browser->min_pcnt = min_percent;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
while (1) {
key = hist_browser__run(browser, "? - help", true);
switch (key) {
case 'q':
goto out;
case '?':
ui_browser__help_window(&browser->b, help);
break;
default:
break;
}
}
out:
hist_browser__delete(browser);
return 0;
}
#else
static int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
struct evsel *evsel __maybe_unused,
float min_percent __maybe_unused)
{
return 0;
}
#endif
int report__browse_block_hists(struct block_hist *bh, float min_percent,
struct evsel *evsel)
{
int ret;
switch (use_browser) {
case 0:
symbol_conf.report_individual_block = true;
hists__fprintf(&bh->block_hists, true, 0, 0, min_percent,
stdout, true);
hists__delete_entries(&bh->block_hists);
return 0;
case 1:
symbol_conf.report_individual_block = true;
ret = block_hists_tui_browse(bh, evsel, min_percent);
hists__delete_entries(&bh->block_hists);
return ret;
default:
return -1;
}
return 0;
}
float block_info__total_cycles_percent(struct hist_entry *he)
{
struct block_info *bi = he->block_info;
if (bi->total_cycles)
return bi->cycles * 100.0 / bi->total_cycles;
return 0.0;
}

View file

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_BLOCK_H
#define __PERF_BLOCK_H
#include <linux/types.h>
#include <linux/refcount.h>
#include "hist.h"
#include "symbol.h"
#include "sort.h"
#include "ui/ui.h"
struct block_info {
struct symbol *sym;
u64 start;
u64 end;
u64 cycles;
u64 cycles_aggr;
s64 cycles_spark[NUM_SPARKS];
u64 total_cycles;
int num;
int num_aggr;
refcount_t refcnt;
};
struct block_fmt {
struct perf_hpp_fmt fmt;
int idx;
int width;
const char *header;
u64 total_cycles;
u64 block_cycles;
};
enum {
PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
PERF_HPP_REPORT__BLOCK_RANGE,
PERF_HPP_REPORT__BLOCK_DSO,
PERF_HPP_REPORT__BLOCK_MAX_INDEX
};
struct block_report {
struct block_hist hist;
u64 cycles;
struct block_fmt fmts[PERF_HPP_REPORT__BLOCK_MAX_INDEX];
};
struct block_hist;
struct block_info *block_info__new(void);
struct block_info *block_info__get(struct block_info *bi);
void block_info__put(struct block_info *bi);
static inline void __block_info__zput(struct block_info **bi)
{
block_info__put(*bi);
*bi = NULL;
}
#define block_info__zput(bi) __block_info__zput(&bi)
int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right);
int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
u64 *block_cycles_aggr, u64 total_cycles);
struct block_report *block_info__create_report(struct evlist *evlist,
u64 total_cycles);
int report__browse_block_hists(struct block_hist *bh, float min_percent,
struct evsel *evsel);
float block_info__total_cycles_percent(struct hist_entry *he);
#endif /* __PERF_BLOCK_H */

View file

@ -206,6 +206,11 @@ int cpu_map__get_core_id(int cpu)
return ret ?: value;
}
int cpu_map__get_node_id(int cpu)
{
return cpu__get_node(cpu);
}
int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
{
int cpu, s_die;
@ -235,6 +240,14 @@ int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
return (s_die << 16) | (cpu & 0xffff);
}
int cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
{
if (idx < 0 || idx >= map->nr)
return -1;
return cpu_map__get_node_id(map->map[idx]);
}
int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp)
{
return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
@ -250,6 +263,11 @@ int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **cor
return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
}
int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct perf_cpu_map **numap)
{
return cpu_map__build_map(cpus, numap, cpu_map__get_node, NULL);
}
/* setup simple routines to easily access node numbers given a cpu number */
static int get_max_num(char *path, int *max)
{

View file

@ -20,9 +20,12 @@ int cpu_map__get_die_id(int cpu);
int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__get_node_id(int cpu);
int cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp);
int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep);
int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep);
int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct perf_cpu_map **nodep);
const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */
static inline int cpu_map__socket(struct perf_cpu_map *sock, int s)

View file

@ -110,7 +110,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
* encode the etm queue number as the upper 16 bit and the channel as
* the lower 16 bit.
*/
#define TO_CS_QUEUE_NR(queue_nr, trace_id_chan) \
#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
(queue_nr << 16 | trace_chan_id)
#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
@ -819,7 +819,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* Note that packets decoded above are still in the traceID's packet
* queue and will be processed in cs_etm__process_queues().
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_id_chan);
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
out:
return ret;

View file

@ -76,6 +76,13 @@ int perf_data__open_dir(struct perf_data *data)
DIR *dir;
int nr = 0;
/*
* Directory containing a single regular perf data file which is already
* open, means there is nothing more to do here.
*/
if (perf_data__is_single_file(data))
return 0;
if (WARN_ON(!data->is_dir))
return -EINVAL;
@ -96,7 +103,7 @@ int perf_data__open_dir(struct perf_data *data)
if (stat(path, &st))
continue;
if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data.", 5))
continue;
ret = -ENOMEM;
@ -306,7 +313,7 @@ static int open_dir(struct perf_data *data)
* So far we open only the header, so we can read the data version and
* layout.
*/
if (asprintf(&data->file.path, "%s/header", data->path) < 0)
if (asprintf(&data->file.path, "%s/data", data->path) < 0)
return -1;
if (perf_data__is_write(data) &&
@ -406,7 +413,7 @@ unsigned long perf_data__size(struct perf_data *data)
u64 size = data->file.size;
int i;
if (!data->is_dir)
if (perf_data__is_single_file(data))
return size;
for (i = 0; i < data->dir.nr; i++) {
@ -417,3 +424,36 @@ unsigned long perf_data__size(struct perf_data *data)
return size;
}
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz)
{
int ret;
if (!data->is_dir)
return -1;
ret = snprintf(buf, buf_sz, "%s/kcore_dir", data->path);
if (ret < 0 || (size_t)ret >= buf_sz)
return -1;
return mkdir(buf, S_IRWXU);
}
char *perf_data__kallsyms_name(struct perf_data *data)
{
char *kallsyms_name;
struct stat st;
if (!data->is_dir)
return NULL;
if (asprintf(&kallsyms_name, "%s/kcore_dir/kallsyms", data->path) < 0)
return NULL;
if (stat(kallsyms_name, &st)) {
free(kallsyms_name);
return NULL;
}
return kallsyms_name;
}

View file

@ -9,6 +9,11 @@ enum perf_data_mode {
PERF_DATA_MODE_READ,
};
enum perf_dir_version {
PERF_DIR_SINGLE_FILE = 0,
PERF_DIR_VERSION = 1,
};
struct perf_data_file {
char *path;
int fd;
@ -50,6 +55,11 @@ static inline bool perf_data__is_dir(struct perf_data *data)
return data->is_dir;
}
static inline bool perf_data__is_single_file(struct perf_data *data)
{
return data->dir.version == PERF_DIR_SINGLE_FILE;
}
static inline int perf_data__fd(struct perf_data *data)
{
return data->file.fd;
@ -77,4 +87,6 @@ int perf_data__open_dir(struct perf_data *data);
void perf_data__close_dir(struct perf_data *data);
int perf_data__update_dir(struct perf_data *data);
unsigned long perf_data__size(struct perf_data *data);
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz);
char *perf_data__kallsyms_name(struct perf_data *data);
#endif /* __PERF_DATA_H */

View file

@ -768,7 +768,7 @@ dso_cache__free(struct dso *dso)
pthread_mutex_unlock(&dso->lock);
}
static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
@ -827,14 +827,16 @@ dso_cache__insert(struct dso *dso, struct dso_cache *new)
return cache;
}
static ssize_t
dso_cache__memcpy(struct dso_cache *cache, u64 offset,
u8 *data, u64 size)
static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
u64 size, bool out)
{
u64 cache_offset = offset - cache->offset;
u64 cache_size = min(cache->size - cache_offset, size);
memcpy(data, cache->data + cache_offset, cache_size);
if (out)
memcpy(data, cache->data + cache_offset, cache_size);
else
memcpy(cache->data + cache_offset, data, cache_size);
return cache_size;
}
@ -863,63 +865,73 @@ static ssize_t file_read(struct dso *dso, struct machine *machine,
return ret;
}
static ssize_t
dso_cache__read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
static struct dso_cache *dso_cache__populate(struct dso *dso,
struct machine *machine,
u64 offset, ssize_t *ret)
{
u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
struct dso_cache *cache;
struct dso_cache *old;
ssize_t ret;
cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
if (!cache)
return -ENOMEM;
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
ret = bpf_read(dso, cache_offset, cache->data);
else
ret = file_read(dso, machine, cache_offset, cache->data);
if (ret > 0) {
cache->offset = cache_offset;
cache->size = ret;
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
ret = dso_cache__memcpy(cache, offset, data, size);
if (!cache) {
*ret = -ENOMEM;
return NULL;
}
if (ret <= 0)
free(cache);
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
*ret = bpf_read(dso, cache_offset, cache->data);
else
*ret = file_read(dso, machine, cache_offset, cache->data);
return ret;
if (*ret <= 0) {
free(cache);
return NULL;
}
cache->offset = cache_offset;
cache->size = *ret;
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
return cache;
}
static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
static struct dso_cache *dso_cache__find(struct dso *dso,
struct machine *machine,
u64 offset,
ssize_t *ret)
{
struct dso_cache *cache = __dso_cache__find(dso, offset);
return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
}
static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size, bool out)
{
struct dso_cache *cache;
ssize_t ret = 0;
cache = dso_cache__find(dso, offset);
if (cache)
return dso_cache__memcpy(cache, offset, data, size);
else
return dso_cache__read(dso, machine, offset, data, size);
cache = dso_cache__find(dso, machine, offset, &ret);
if (!cache)
return ret;
return dso_cache__memcpy(cache, offset, data, size, out);
}
/*
* Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
* in the rb_tree. Any read to already cached data is served
* by cached data.
* by cached data. Writes update the cache only, not the backing file.
*/
static ssize_t cached_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
static ssize_t cached_io(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size, bool out)
{
ssize_t r = 0;
u8 *p = data;
@ -927,7 +939,7 @@ static ssize_t cached_read(struct dso *dso, struct machine *machine,
do {
ssize_t ret;
ret = dso_cache_read(dso, machine, offset, p, size);
ret = dso_cache_io(dso, machine, offset, p, size, out);
if (ret < 0)
return ret;
@ -1011,8 +1023,9 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
return dso->data.file_size;
}
static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size,
bool out)
{
if (dso__data_file_size(dso, machine))
return -1;
@ -1024,7 +1037,7 @@ static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
if (offset + size < offset)
return -1;
return cached_read(dso, machine, offset, data, size);
return cached_io(dso, machine, offset, data, size, out);
}
/**
@ -1044,7 +1057,7 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_offset(dso, machine, offset, data, size);
return data_read_write_offset(dso, machine, offset, data, size, true);
}
/**
@ -1065,6 +1078,46 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
return dso__data_read_offset(dso, machine, offset, data, size);
}
/**
* dso__data_write_cache_offs - Write data to dso data cache at file offset
* @dso: dso object
* @machine: machine object
* @offset: file offset
* @data: buffer to write
* @size: size of the @data buffer
*
* Write into the dso file data cache, but do not change the file itself.
*/
ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
u64 offset, const u8 *data_in, ssize_t size)
{
u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_write_offset(dso, machine, offset, data, size, false);
}
/**
* dso__data_write_cache_addr - Write data to dso data cache at dso address
* @dso: dso object
* @machine: machine object
* @add: virtual memory address
* @data: buffer to write
* @size: size of the @data buffer
*
* External interface to write into the dso file data cache, but do not change
* the file itself.
*/
ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
const u8 *data, ssize_t size)
{
u64 offset = map->map_ip(map, addr);
return dso__data_write_cache_offs(dso, machine, offset, data, size);
}
struct map *dso__new_map(const char *name)
{
struct map *map = NULL;

View file

@ -285,6 +285,8 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
* dso__data_size
* dso__data_read_offset
* dso__data_read_addr
* dso__data_write_cache_offs
* dso__data_write_cache_addr
*
* Please refer to the dso.c object code for each function and
* arguments documentation. Following text tries to explain the
@ -332,6 +334,11 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
u8 *data, ssize_t size);
bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by);
ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
u64 offset, const u8 *data, ssize_t size);
ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
const u8 *data, ssize_t size);
struct map *dso__new_map(const char *name);
struct dso *machine__findnew_kernel(struct machine *machine, const char *name,

View file

@ -307,21 +307,51 @@ bool die_is_func_def(Dwarf_Die *dw_die)
dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
}
/**
* die_entrypc - Returns entry PC (the lowest address) of a DIE
* @dw_die: a DIE
* @addr: where to store entry PC
*
* Since dwarf_entrypc() does not return entry PC if the DIE has only address
* range, we have to use this to retrieve the lowest address from the address
* range attribute.
*/
int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
{
Dwarf_Addr base, end;
if (!addr)
return -EINVAL;
if (dwarf_entrypc(dw_die, addr) == 0)
return 0;
return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
}
/**
* die_is_func_instance - Ensure that this DIE is an instance of a subprogram
* @dw_die: a DIE
*
* Ensure that this DIE is an instance (which has an entry address).
* This returns true if @dw_die is a function instance. If not, you need to
* call die_walk_instances() to find actual instances.
* This returns true if @dw_die is a function instance. If not, the @dw_die
* must be a prototype. You can use die_walk_instances() to find actual
* instances.
**/
bool die_is_func_instance(Dwarf_Die *dw_die)
{
Dwarf_Addr tmp;
Dwarf_Attribute attr_mem;
int tag = dwarf_tag(dw_die);
/* Actually gcc optimizes non-inline as like as inlined */
return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
if (tag != DW_TAG_subprogram &&
tag != DW_TAG_inlined_subroutine)
return false;
return dwarf_entrypc(dw_die, &tmp) == 0 ||
dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
}
/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
@ -598,6 +628,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
Dwarf_Die *origin;
int tmp;
if (!die_is_func_instance(inst))
return DIE_FIND_CB_CONTINUE;
attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
if (attr == NULL)
return DIE_FIND_CB_CONTINUE;
@ -669,15 +702,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
fname = die_get_call_file(in_die);
lineno = die_get_call_lineno(in_die);
if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
if (!lw->recursive)
return DIE_FIND_CB_SIBLING;
}
if (!lw->recursive)
/* Don't need to search recursively */
return DIE_FIND_CB_SIBLING;
if (addr) {
fname = dwarf_decl_file(in_die);
@ -710,7 +742,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
/* Handle function declaration line */
fname = dwarf_decl_file(sp_die);
if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
dwarf_entrypc(sp_die, &addr) == 0) {
die_entrypc(sp_die, &addr) == 0) {
lw.retval = callback(fname, lineno, addr, data);
if (lw.retval != 0)
goto done;
@ -724,6 +756,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
{
struct __line_walk_param *lw = data;
/*
* Since inlined function can include another inlined function in
* the same file, we need to walk in it recursively.
*/
lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
if (lw->retval != 0)
return DWARF_CB_ABORT;
@ -748,11 +784,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
Dwarf_Lines *lines;
Dwarf_Line *line;
Dwarf_Addr addr;
const char *fname, *decf = NULL;
const char *fname, *decf = NULL, *inf = NULL;
int lineno, ret = 0;
int decl = 0, inl;
Dwarf_Die die_mem, *cu_die;
size_t nlines, i;
bool flag;
/* Get the CU die */
if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
@ -783,6 +820,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
"Possible error in debuginfo.\n");
continue;
}
/* Skip end-of-sequence */
if (dwarf_lineendsequence(line, &flag) != 0 || flag)
continue;
/* Skip Non statement line-info */
if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
continue;
/* Filter lines based on address */
if (rt_die != cu_die) {
/*
@ -792,13 +835,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (!dwarf_haspc(rt_die, addr))
continue;
if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
/* Call-site check */
inf = die_get_call_file(&die_mem);
if ((inf && !strcmp(inf, decf)) &&
die_get_call_lineno(&die_mem) == lineno)
goto found;
dwarf_decl_line(&die_mem, &inl);
if (inl != decl ||
decf != dwarf_decl_file(&die_mem))
continue;
}
}
found:
/* Get source line */
fname = dwarf_linesrc(line, NULL, NULL);
@ -813,8 +864,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (rt_die != cu_die)
/*
* Don't need walk functions recursively, because nested
* inlined functions don't have lines of the specified DIE.
* Don't need walk inlined functions recursively, because
* inner inlined functions don't have the lines of the
* specified function.
*/
ret = __die_walk_funclines(rt_die, false, callback, data);
else {
@ -989,7 +1041,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
bool first = true;
const char *name;
ret = dwarf_entrypc(sp_die, &entry);
ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
@ -1052,7 +1104,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
bool first = true;
const char *name;
ret = dwarf_entrypc(sp_die, &entry);
ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;

View file

@ -29,6 +29,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
/* Get DW_AT_linkage_name (should be NULL for C binary) */
const char *die_get_linkage_name(Dwarf_Die *dw_die);
/* Get the lowest PC in DIE (including range list) */
int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
/* Ensure that this DIE is a subprogram and definition (not declaration) */
bool die_is_func_def(Dwarf_Die *dw_die);

View file

@ -180,6 +180,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@ -354,3 +355,42 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
int perf_env__numa_node(struct perf_env *env, int cpu)
{
if (!env->nr_numa_map) {
struct numa_node *nn;
int i, nr = 0;
for (i = 0; i < env->nr_numa_nodes; i++) {
nn = &env->numa_nodes[i];
nr = max(nr, perf_cpu_map__max(nn->map));
}
nr++;
/*
* We initialize the numa_map array to prepare
* it for missing cpus, which return node -1
*/
env->numa_map = malloc(nr * sizeof(int));
if (!env->numa_map)
return -1;
for (i = 0; i < nr; i++)
env->numa_map[i] = -1;
env->nr_numa_map = nr;
for (i = 0; i < env->nr_numa_nodes; i++) {
int tmp, j;
nn = &env->numa_nodes[i];
perf_cpu_map__for_each_cpu(j, tmp, nn->map)
env->numa_map[j] = i;
}
}
return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
}

View file

@ -87,6 +87,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
/* For fast cpu to numa node lookup via perf_env__numa_node */
int *numa_map;
int nr_numa_map;
};
enum perf_compress_type {
@ -120,4 +124,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */

View file

@ -1574,7 +1574,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
{
int cpu, thread, nthreads;
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
int pid = -1, err;
int pid = -1, err, old_errno;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
@ -1727,8 +1727,8 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
*/
if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
struct rlimit l;
int old_errno = errno;
old_errno = errno;
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (set_rlimit == NO_CHANGE)
l.rlim_cur = l.rlim_max;
@ -1812,13 +1812,16 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
if (err)
threads->err_thread = thread;
old_errno = errno;
do {
while (--thread >= 0) {
close(FD(evsel, cpu, thread));
if (FD(evsel, cpu, thread) >= 0)
close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
thread = nthreads;
} while (--cpu >= 0);
errno = old_errno;
return err;
}

View file

@ -52,10 +52,6 @@ enum perf_header_version {
PERF_HEADER_VERSION_2,
};
enum perf_dir_version {
PERF_DIR_VERSION = 1,
};
struct perf_file_section {
u64 offset;
u64 size;

View file

@ -18,6 +18,7 @@
#include "srcline.h"
#include "symbol.h"
#include "thread.h"
#include "block-info.h"
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
@ -80,6 +81,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
int symlen;
u16 len;
if (h->block_info)
return;
/*
* +4 accounts for '[x] ' priv level info
* +2 accounts for 0x prefix on raw addresses
@ -755,6 +758,10 @@ struct hist_entry *hists__add_entry_block(struct hists *hists,
struct hist_entry entry = {
.block_info = block_info,
.hists = hists,
.ms = {
.map = al->map,
.sym = al->sym,
},
}, *he = hists__findnew_entry(hists, &entry, al, false);
return he;
@ -2569,7 +2576,8 @@ int hists__unlink(struct hists *hists)
}
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode)
struct perf_sample *sample, bool nonany_branch_mode,
u64 *total_cycles)
{
struct branch_info *bi;
@ -2596,6 +2604,9 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
prev = &bi[i].to;
if (total_cycles)
*total_cycles += bi[i].flags.cycles;
}
free(bi);
}

View file

@ -527,7 +527,8 @@ unsigned int hists__sort_list_width(struct hists *hists);
unsigned int hists__overhead_width(struct hists *hists);
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode);
struct perf_sample *sample, bool nonany_branch_mode,
u64 *total_cycles);
struct option;
int parse_filter_percentage(const struct option *opt, const char *arg, int unset);

View file

@ -418,10 +418,9 @@ void llvm__dump_obj(const char *path, void *obj_buf, size_t size)
goto out;
}
pr_info("LLVM: dumping %s\n", obj_path);
pr_debug("LLVM: dumping %s\n", obj_path);
if (fwrite(obj_buf, size, 1, fp) != 1)
pr_warning("WARNING: failed to write to file '%s': %s, skip object dumping\n",
obj_path, strerror(errno));
pr_debug("WARNING: failed to write to file '%s': %s, skip object dumping\n", obj_path, strerror(errno));
fclose(fp);
out:
free(obj_path);

View file

@ -42,6 +42,11 @@
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
static struct dso *machine__kernel_dso(struct machine *machine)
{
return machine->vmlinux_map->dso;
}
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
@ -861,7 +866,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
struct dso *kdso = machine__kernel_map(machine)->dso;
struct dso *kdso = machine__kernel_dso(machine);
if (kdso->has_build_id) {
char filename[PATH_MAX];
@ -1057,7 +1062,7 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
@ -1543,8 +1548,7 @@ static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
static int machine__process_extra_kernel_map(struct machine *machine,
union perf_event *event)
{
struct map *kernel_map = machine__kernel_map(machine);
struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
struct dso *kernel = machine__kernel_dso(machine);
struct extra_kernel_map xm = {
.start = event->mmap.start,
.end = event->mmap.start + event->mmap.len,

View file

@ -288,7 +288,7 @@ bool map__has_symbols(const struct map *map)
static void map__exit(struct map *map)
{
BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
BUG_ON(refcount_read(&map->refcnt) != 0);
dso__zput(map->dso);
}
@ -594,28 +594,20 @@ void map_groups__insert(struct map_groups *mg, struct map *map)
static void __maps__purge(struct maps *maps)
{
struct rb_root *root = &maps->entries;
struct rb_node *next = rb_first(root);
struct map *pos, *next;
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_init(&pos->rb_node, root);
maps__for_each_entry_safe(maps, pos, next) {
rb_erase_init(&pos->rb_node, &maps->entries);
map__put(pos);
}
}
static void __maps__purge_names(struct maps *maps)
{
struct rb_root *root = &maps->names;
struct rb_node *next = rb_first(root);
struct map *pos, *next;
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node_name);
next = rb_next(&pos->rb_node_name);
rb_erase_init(&pos->rb_node_name, root);
maps__for_each_entry_by_name_safe(maps, pos, next) {
rb_erase_init(&pos->rb_node_name, &maps->names);
map__put(pos);
}
}
@ -687,13 +679,11 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct map **mapp)
{
struct symbol *sym;
struct rb_node *nd;
struct map *pos;
down_read(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
maps__for_each_entry(maps, pos) {
sym = map__find_symbol_by_name(pos, name);
if (sym == NULL)
@ -739,12 +729,11 @@ int map_groups__find_ams(struct addr_map_symbol *ams)
static size_t maps__fprintf(struct maps *maps, FILE *fp)
{
size_t printed = 0;
struct rb_node *nd;
struct map *pos;
down_read(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
maps__for_each_entry(maps, pos) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 2) {
@ -889,7 +878,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent)
down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
struct map *new = map__clone(map);
if (new == NULL)
goto out_unlock;
@ -1007,7 +996,7 @@ struct map *maps__first(struct maps *maps)
return NULL;
}
struct map *map__next(struct map *map)
static struct map *__map__next(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node);
@ -1016,6 +1005,34 @@ struct map *map__next(struct map *map)
return NULL;
}
struct map *map__next(struct map *map)
{
return map ? __map__next(map) : NULL;
}
struct map *maps__first_by_name(struct maps *maps)
{
struct rb_node *first = rb_first(&maps->names);
if (first)
return rb_entry(first, struct map, rb_node_name);
return NULL;
}
static struct map *__map__next_by_name(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node_name);
if (next)
return rb_entry(next, struct map, rb_node_name);
return NULL;
}
struct map *map__next_by_name(struct map *map)
{
return map ? __map__next_by_name(map) : NULL;
}
struct kmap *__map__kmap(struct map *map)
{
if (!map->dso || !map->dso->kernel)

View file

@ -25,7 +25,22 @@ void maps__remove(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
#define maps__for_each_entry(maps, map) \
for (map = maps__first(maps); map; map = map__next(map))
#define maps__for_each_entry_safe(maps, map, next) \
for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map))
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
struct map *maps__first_by_name(struct maps *maps);
struct map *map__next_by_name(struct map *map);
#define maps__for_each_entry_by_name(maps, map) \
for (map = maps__first_by_name(maps); map; map = map__next_by_name(map))
#define maps__for_each_entry_by_name_safe(maps, map, next) \
for (map = maps__first_by_name(maps), next = map__next_by_name(map); map; map = next, next = map__next_by_name(map))
struct map_groups {
struct maps maps;
@ -74,12 +89,11 @@ static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
return maps__find(&mg->maps, addr);
}
struct map *map_groups__first(struct map_groups *mg);
#define map_groups__for_each_entry(mg, map) \
for (map = maps__first(&mg->maps); map; map = map__next(map))
static inline struct map *map_groups__next(struct map *map)
{
return map__next(map);
}
#define map_groups__for_each_entry_safe(mg, map, next) \
for (map = maps__first(&mg->maps), next = map__next(map); map; map = next, next = map__next(map))
struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);

View file

@ -182,6 +182,20 @@ static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
#define MAX_EVENT_LENGTH 512
void parse_events__handle_error(struct parse_events_error *err, int idx,
char *str, char *help)
{
if (WARN(!str, "WARNING: failed to provide error string\n")) {
free(help);
return;
}
WARN_ONCE(err->str, "WARNING: multiple event parsing errors\n");
err->idx = idx;
free(err->str);
err->str = str;
free(err->help);
err->help = help;
}
struct tracepoint_path *tracepoint_id_to_path(u64 config)
{
@ -932,11 +946,11 @@ static int check_type_val(struct parse_events_term *term,
return 0;
if (err) {
err->idx = term->err_val;
if (type == PARSE_EVENTS__TERM_TYPE_NUM)
err->str = strdup("expected numeric value");
else
err->str = strdup("expected string value");
parse_events__handle_error(err, term->err_val,
type == PARSE_EVENTS__TERM_TYPE_NUM
? strdup("expected numeric value")
: strdup("expected string value"),
NULL);
}
return -EINVAL;
}
@ -972,8 +986,11 @@ static bool config_term_shrinked;
static bool
config_term_avail(int term_type, struct parse_events_error *err)
{
char *err_str;
if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
err->str = strdup("Invalid term_type");
parse_events__handle_error(err, -1,
strdup("Invalid term_type"), NULL);
return false;
}
if (!config_term_shrinked)
@ -992,9 +1009,9 @@ config_term_avail(int term_type, struct parse_events_error *err)
return false;
/* term_type is validated so indexing is safe */
if (asprintf(&err->str, "'%s' is not usable in 'perf stat'",
config_term_names[term_type]) < 0)
err->str = NULL;
if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
config_term_names[term_type]) >= 0)
parse_events__handle_error(err, -1, err_str, NULL);
return false;
}
}
@ -1036,17 +1053,20 @@ do { \
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
CHECK_TYPE_VAL(STR);
if (strcmp(term->val.str, "no") &&
parse_branch_str(term->val.str, &attr->branch_sample_type)) {
err->str = strdup("invalid branch sample type");
err->idx = term->err_val;
parse_branch_str(term->val.str,
&attr->branch_sample_type)) {
parse_events__handle_error(err, term->err_val,
strdup("invalid branch sample type"),
NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM);
if (term->val.num > 1) {
err->str = strdup("expected 0 or 1");
err->idx = term->err_val;
parse_events__handle_error(err, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
}
break;
@ -1080,8 +1100,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_PERCORE:
CHECK_TYPE_VAL(NUM);
if ((unsigned int)term->val.num > 1) {
err->str = strdup("expected 0 or 1");
err->idx = term->err_val;
parse_events__handle_error(err, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
}
break;
@ -1089,9 +1110,9 @@ do { \
CHECK_TYPE_VAL(NUM);
break;
default:
err->str = strdup("unknown term");
err->idx = term->err_term;
err->help = parse_events_formats_error_string(NULL);
parse_events__handle_error(err, term->err_term,
strdup("unknown term"),
parse_events_formats_error_string(NULL));
return -EINVAL;
}
@ -1142,9 +1163,9 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
return config_term_common(attr, term, err);
default:
if (err) {
err->idx = term->err_term;
err->str = strdup("unknown term");
err->help = strdup("valid terms: call-graph,stack-size\n");
parse_events__handle_error(err, term->err_term,
strdup("unknown term"),
strdup("valid terms: call-graph,stack-size\n"));
}
return -EINVAL;
}
@ -1323,10 +1344,12 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
pmu = perf_pmu__find(name);
if (!pmu) {
if (asprintf(&err->str,
char *err_str;
if (asprintf(&err_str,
"Cannot find PMU `%s'. Missing kernel support?",
name) < 0)
err->str = NULL;
name) >= 0)
parse_events__handle_error(err, -1, err_str, NULL);
return -EINVAL;
}
@ -1365,8 +1388,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
if (perf_pmu__config(pmu, &attr, head_config, parse_state->error))
if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
struct perf_evsel_config_term *pos, *tmp;
list_for_each_entry_safe(pos, tmp, &config_terms, list) {
list_del_init(&pos->list);
free(pos);
}
return -EINVAL;
}
evsel = __add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), pmu,
@ -1389,7 +1419,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str, struct list_head **listp)
{
struct list_head *head;
struct parse_events_term *term;
struct list_head *list;
struct perf_pmu *pmu = NULL;
@ -1406,19 +1435,30 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
list_for_each_entry(alias, &pmu->aliases, list) {
if (!strcasecmp(alias->name, str)) {
struct list_head *head;
char *config;
head = malloc(sizeof(struct list_head));
if (!head)
return -1;
INIT_LIST_HEAD(head);
if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
str, 1, false, &str, NULL) < 0)
config = strdup(str);
if (!config)
return -1;
if (parse_events_term__num(&term,
PARSE_EVENTS__TERM_TYPE_USER,
config, 1, false, &config,
NULL) < 0) {
free(list);
free(config);
return -1;
}
list_add_tail(&term->list, head);
if (!parse_events_add_pmu(parse_state, list,
pmu->name, head,
true, true)) {
pr_debug("%s -> %s/%s/\n", str,
pr_debug("%s -> %s/%s/\n", config,
pmu->name, alias->str);
ok++;
}
@ -1427,8 +1467,10 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
}
}
}
if (!ok)
if (!ok) {
free(list);
return -1;
}
*listp = list;
return 0;
}
@ -1927,15 +1969,20 @@ int parse_events(struct evlist *evlist, const char *str,
ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS);
perf_pmu__parse_cleanup();
if (!ret && list_empty(&parse_state.list)) {
WARN_ONCE(true, "WARNING: event parser found nothing\n");
return -1;
}
/*
* Add list to the evlist even with errors to allow callers to clean up.
*/
perf_evlist__splice_list_tail(evlist, &parse_state.list);
if (!ret) {
struct evsel *last;
if (list_empty(&parse_state.list)) {
WARN_ONCE(true, "WARNING: event parser found nothing\n");
return -1;
}
perf_evlist__splice_list_tail(evlist, &parse_state.list);
evlist->nr_groups += parse_state.nr_groups;
last = evlist__last(evlist);
last->cmdline_group_boundary = true;
@ -2718,30 +2765,63 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx)
{
struct event_symbol *sym;
char *str;
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_STR,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
.config = config ?: (char *) "event",
.config = config,
};
if (!temp.config) {
temp.config = strdup("event");
if (!temp.config)
return -ENOMEM;
}
BUG_ON(idx >= PERF_COUNT_HW_MAX);
sym = &event_symbols_hw[idx];
return new_term(term, &temp, (char *) sym->symbol, 0);
str = strdup(sym->symbol);
if (!str)
return -ENOMEM;
return new_term(term, &temp, str, 0);
}
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term)
{
char *str;
struct parse_events_term temp = {
.type_val = term->type_val,
.type_term = term->type_term,
.config = term->config,
.config = NULL,
.err_term = term->err_term,
.err_val = term->err_val,
};
return new_term(new, &temp, term->val.str, term->val.num);
if (term->config) {
temp.config = strdup(term->config);
if (!temp.config)
return -ENOMEM;
}
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
return new_term(new, &temp, NULL, term->val.num);
str = strdup(term->val.str);
if (!str)
return -ENOMEM;
return new_term(new, &temp, str, 0);
}
void parse_events_term__delete(struct parse_events_term *term)
{
if (term->array.nr_ranges)
zfree(&term->array.ranges);
if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
zfree(&term->val.str);
zfree(&term->config);
free(term);
}
int parse_events_copy_term_list(struct list_head *old,
@ -2774,10 +2854,8 @@ void parse_events_terms__purge(struct list_head *terms)
struct parse_events_term *term, *h;
list_for_each_entry_safe(term, h, terms, list) {
if (term->array.nr_ranges)
zfree(&term->array.ranges);
list_del_init(&term->list);
free(term);
parse_events_term__delete(term);
}
}
@ -2797,13 +2875,10 @@ void parse_events__clear_array(struct parse_events_array *a)
void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str)
{
struct parse_events_error *err = parse_state->error;
if (!err)
if (!parse_state->error)
return;
err->idx = idx;
err->str = strdup(str);
WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
parse_events__handle_error(parse_state->error, idx, strdup(str), NULL);
}
static void config_terms_list(char *buf, size_t buf_sz)

View file

@ -124,6 +124,8 @@ struct parse_events_state {
struct list_head *terms;
};
void parse_events__handle_error(struct parse_events_error *err, int idx,
char *str, char *help);
void parse_events__shrink_config_terms(void);
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **term,
@ -137,6 +139,7 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx);
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
void parse_events_term__delete(struct parse_events_term *term);
void parse_events_terms__delete(struct list_head *terms);
void parse_events_terms__purge(struct list_head *terms);
void parse_events__clear_array(struct parse_events_array *a);

View file

@ -12,6 +12,7 @@
#include <stdio.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include "pmu.h"
#include "evsel.h"
#include "parse-events.h"
@ -25,12 +26,28 @@ do { \
YYABORT; \
} while (0)
#define ALLOC_LIST(list) \
do { \
list = malloc(sizeof(*list)); \
ABORT_ON(!list); \
INIT_LIST_HEAD(list); \
} while (0)
static struct list_head* alloc_list()
{
struct list_head *list;
list = malloc(sizeof(*list));
if (!list)
return NULL;
INIT_LIST_HEAD(list);
return list;
}
static void free_list_evsel(struct list_head* list_evsel)
{
struct evsel *evsel, *tmp;
list_for_each_entry_safe(evsel, tmp, list_evsel, core.node) {
list_del_init(&evsel->core.node);
perf_evsel__delete(evsel);
}
free(list_evsel);
}
static void inc_group_count(struct list_head *list,
struct parse_events_state *parse_state)
@ -61,6 +78,7 @@ static void inc_group_count(struct list_head *list,
%type <num> PE_VALUE_SYM_TOOL
%type <num> PE_RAW
%type <num> PE_TERM
%type <num> value_sym
%type <str> PE_NAME
%type <str> PE_BPF_OBJECT
%type <str> PE_BPF_SOURCE
@ -71,37 +89,43 @@ static void inc_group_count(struct list_head *list,
%type <str> PE_EVENT_NAME
%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
%type <str> PE_DRV_CFG_TERM
%type <num> value_sym
%type <head> event_config
%type <head> opt_event_config
%type <head> opt_pmu_config
%destructor { free ($$); } <str>
%type <term> event_term
%type <head> event_pmu
%type <head> event_legacy_symbol
%type <head> event_legacy_cache
%type <head> event_legacy_mem
%type <head> event_legacy_tracepoint
%destructor { parse_events_term__delete ($$); } <term>
%type <list_terms> event_config
%type <list_terms> opt_event_config
%type <list_terms> opt_pmu_config
%destructor { parse_events_terms__delete ($$); } <list_terms>
%type <list_evsel> event_pmu
%type <list_evsel> event_legacy_symbol
%type <list_evsel> event_legacy_cache
%type <list_evsel> event_legacy_mem
%type <list_evsel> event_legacy_tracepoint
%type <list_evsel> event_legacy_numeric
%type <list_evsel> event_legacy_raw
%type <list_evsel> event_bpf_file
%type <list_evsel> event_def
%type <list_evsel> event_mod
%type <list_evsel> event_name
%type <list_evsel> event
%type <list_evsel> events
%type <list_evsel> group_def
%type <list_evsel> group
%type <list_evsel> groups
%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
%type <head> event_legacy_numeric
%type <head> event_legacy_raw
%type <head> event_bpf_file
%type <head> event_def
%type <head> event_mod
%type <head> event_name
%type <head> event
%type <head> events
%type <head> group_def
%type <head> group
%type <head> groups
%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
%type <array> array
%type <array> array_term
%type <array> array_terms
%destructor { free ($$.ranges); } <array>
%union
{
char *str;
u64 num;
struct list_head *head;
struct list_head *list_evsel;
struct list_head *list_terms;
struct parse_events_term *term;
struct tracepoint_name {
char *sys;
@ -120,6 +144,7 @@ start_events: groups
{
struct parse_events_state *parse_state = _parse_state;
/* frees $1 */
parse_events_update_lists($1, &parse_state->list);
}
@ -129,6 +154,7 @@ groups ',' group
struct list_head *list = $1;
struct list_head *group = $3;
/* frees $3 */
parse_events_update_lists(group, list);
$$ = list;
}
@ -138,6 +164,7 @@ groups ',' event
struct list_head *list = $1;
struct list_head *event = $3;
/* frees $3 */
parse_events_update_lists(event, list);
$$ = list;
}
@ -150,8 +177,14 @@ group:
group_def ':' PE_MODIFIER_EVENT
{
struct list_head *list = $1;
int err;
ABORT_ON(parse_events__modifier_group(list, $3));
err = parse_events__modifier_group(list, $3);
free($3);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
|
@ -164,6 +197,7 @@ PE_NAME '{' events '}'
inc_group_count(list, _parse_state);
parse_events__set_leader($1, list, _parse_state);
free($1);
$$ = list;
}
|
@ -182,6 +216,7 @@ events ',' event
struct list_head *event = $3;
struct list_head *list = $1;
/* frees $3 */
parse_events_update_lists(event, list);
$$ = list;
}
@ -194,13 +229,19 @@ event_mod:
event_name PE_MODIFIER_EVENT
{
struct list_head *list = $1;
int err;
/*
* Apply modifier on all events added by single event definition
* (there could be more events added for multiple tracepoint
* definitions via '*?'.
*/
ABORT_ON(parse_events__modifier_event(list, $2, false));
err = parse_events__modifier_event(list, $2, false);
free($2);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
|
@ -209,8 +250,14 @@ event_name
event_name:
PE_EVENT_NAME event_def
{
ABORT_ON(parse_events_name($2, $1));
int err;
err = parse_events_name($2, $1);
free($1);
if (err) {
free_list_evsel($2);
YYABORT;
}
$$ = $2;
}
|
@ -230,22 +277,33 @@ PE_NAME opt_pmu_config
{
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list, *orig_terms, *terms;
struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
char *pattern = NULL;
#define CLEANUP_YYABORT \
do { \
parse_events_terms__delete($2); \
parse_events_terms__delete(orig_terms); \
free($1); \
free(pattern); \
YYABORT; \
} while(0)
if (parse_events_copy_term_list($2, &orig_terms))
YYABORT;
CLEANUP_YYABORT;
if (error)
error->idx = @1.first_column;
ALLOC_LIST(list);
list = alloc_list();
if (!list)
CLEANUP_YYABORT;
if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
struct perf_pmu *pmu = NULL;
int ok = 0;
char *pattern;
if (asprintf(&pattern, "%s*", $1) < 0)
YYABORT;
CLEANUP_YYABORT;
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
char *name = pmu->name;
@ -254,31 +312,32 @@ PE_NAME opt_pmu_config
strncmp($1, "uncore_", 7))
name += 7;
if (!fnmatch(pattern, name, 0)) {
if (parse_events_copy_term_list(orig_terms, &terms)) {
free(pattern);
YYABORT;
}
if (parse_events_copy_term_list(orig_terms, &terms))
CLEANUP_YYABORT;
if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
ok++;
parse_events_terms__delete(terms);
}
}
free(pattern);
if (!ok)
YYABORT;
CLEANUP_YYABORT;
}
parse_events_terms__delete($2);
parse_events_terms__delete(orig_terms);
free($1);
$$ = list;
#undef CLEANUP_YYABORT
}
|
PE_KERNEL_PMU_EVENT sep_dc
{
struct list_head *list;
int err;
if (parse_events_multi_pmu_add(_parse_state, $1, &list) < 0)
err = parse_events_multi_pmu_add(_parse_state, $1, &list);
free($1);
if (err < 0)
YYABORT;
$$ = list;
}
@ -289,6 +348,8 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
char pmu_name[128];
snprintf(&pmu_name, 128, "%s-%s", $1, $3);
free($1);
free($3);
if (parse_events_multi_pmu_add(_parse_state, pmu_name, &list) < 0)
YYABORT;
$$ = list;
@ -305,10 +366,16 @@ value_sym '/' event_config '/'
struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, $3));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_numeric(_parse_state, list, type, config, $3);
parse_events_terms__delete($3);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
|
@ -318,7 +385,8 @@ value_sym sep_slash_slash_dc
int type = $1 >> 16;
int config = $1 & 255;
ALLOC_LIST(list);
list = alloc_list();
ABORT_ON(!list);
ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL));
$$ = list;
}
@ -327,7 +395,8 @@ PE_VALUE_SYM_TOOL sep_slash_slash_dc
{
struct list_head *list;
ALLOC_LIST(list);
list = alloc_list();
ABORT_ON(!list);
ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
$$ = list;
}
@ -338,10 +407,19 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_e
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6);
parse_events_terms__delete($6);
free($1);
free($3);
free($5);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
|
@ -350,10 +428,18 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4);
parse_events_terms__delete($4);
free($1);
free($3);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
|
@ -362,10 +448,17 @@ PE_NAME_CACHE_TYPE opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2);
parse_events_terms__delete($2);
free($1);
if (err) {
free_list_evsel(list);
YYABORT;
}
$$ = list;
}
@ -374,10 +467,17 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $6, $4));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $6, $4);
free($6);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
|
@ -386,9 +486,13 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, $4));
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, $4)) {
free(list);
YYABORT;
}
$$ = list;
}
|
@ -396,10 +500,17 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $4, 0));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, $4, 0);
free($4);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
|
@ -408,9 +519,13 @@ PE_PREFIX_MEM PE_VALUE sep_dc
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, 0));
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
(void *) $2, NULL, 0)) {
free(list);
YYABORT;
}
$$ = list;
}
@ -420,28 +535,35 @@ tracepoint_name opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
int err;
ALLOC_LIST(list);
list = alloc_list();
ABORT_ON(!list);
if (error)
error->idx = @1.first_column;
if (parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
error, $2))
return -1;
err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
error, $2);
parse_events_terms__delete($2);
free($1.sys);
free($1.event);
if (err) {
free(list);
return -1;
}
$$ = list;
}
tracepoint_name:
PE_NAME '-' PE_NAME ':' PE_NAME
{
char sys_name[128];
struct tracepoint_name tracepoint;
snprintf(&sys_name, 128, "%s-%s", $1, $3);
tracepoint.sys = &sys_name;
ABORT_ON(asprintf(&tracepoint.sys, "%s-%s", $1, $3) < 0);
tracepoint.event = $5;
free($1);
free($3);
$$ = tracepoint;
}
|
@ -456,10 +578,16 @@ event_legacy_numeric:
PE_VALUE ':' PE_VALUE opt_event_config
{
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4);
parse_events_terms__delete($4);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
@ -467,10 +595,16 @@ event_legacy_raw:
PE_RAW opt_event_config
{
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, $1, $2));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, $1, $2);
parse_events_terms__delete($2);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
@ -479,20 +613,33 @@ PE_BPF_OBJECT opt_event_config
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_load_bpf(parse_state, list, $1, false, $2));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_load_bpf(parse_state, list, $1, false, $2);
parse_events_terms__delete($2);
free($1);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
|
PE_BPF_SOURCE opt_event_config
{
struct list_head *list;
int err;
ALLOC_LIST(list);
ABORT_ON(parse_events_load_bpf(_parse_state, list, $1, true, $2));
list = alloc_list();
ABORT_ON(!list);
err = parse_events_load_bpf(_parse_state, list, $1, true, $2);
parse_events_terms__delete($2);
if (err) {
free(list);
YYABORT;
}
$$ = list;
}
@ -525,6 +672,10 @@ opt_pmu_config:
start_terms: event_config
{
struct parse_events_state *parse_state = _parse_state;
if (parse_state->terms) {
parse_events_terms__delete ($1);
YYABORT;
}
parse_state->terms = $1;
}
@ -534,7 +685,10 @@ event_config ',' event_term
struct list_head *head = $1;
struct parse_events_term *term = $3;
ABORT_ON(!head);
if (!head) {
parse_events_term__delete(term);
YYABORT;
}
list_add_tail(&term->list, head);
$$ = $1;
}
@ -555,8 +709,12 @@ PE_NAME '=' PE_NAME
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3, &@1, &@3));
if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3, &@1, &@3)) {
free($1);
free($3);
YYABORT;
}
$$ = term;
}
|
@ -564,8 +722,11 @@ PE_NAME '=' PE_VALUE
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3, false, &@1, &@3));
if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3, false, &@1, &@3)) {
free($1);
YYABORT;
}
$$ = term;
}
|
@ -574,7 +735,10 @@ PE_NAME '=' PE_VALUE_SYM_HW
struct parse_events_term *term;
int config = $3 & 255;
ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
if (parse_events_term__sym_hw(&term, $1, config)) {
free($1);
YYABORT;
}
$$ = term;
}
|
@ -582,8 +746,11 @@ PE_NAME
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, 1, true, &@1, NULL));
if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, 1, true, &@1, NULL)) {
free($1);
YYABORT;
}
$$ = term;
}
|
@ -600,7 +767,10 @@ PE_TERM '=' PE_NAME
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3));
if (parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3)) {
free($3);
YYABORT;
}
$$ = term;
}
|
@ -624,9 +794,13 @@ PE_NAME array '=' PE_NAME
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $4, &@1, &@4));
if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $4, &@1, &@4)) {
free($1);
free($4);
free($2.ranges);
YYABORT;
}
term->array = $2;
$$ = term;
}
@ -635,8 +809,12 @@ PE_NAME array '=' PE_VALUE
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $4, false, &@1, &@4));
if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $4, false, &@1, &@4)) {
free($1);
free($2.ranges);
YYABORT;
}
term->array = $2;
$$ = term;
}
@ -644,9 +822,15 @@ PE_NAME array '=' PE_VALUE
PE_DRV_CFG_TERM
{
struct parse_events_term *term;
char *config = strdup($1);
ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
$1, $1, &@1, NULL));
ABORT_ON(!config);
if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
config, $1, &@1, NULL)) {
free($1);
free(config);
YYABORT;
}
$$ = term;
}
@ -668,14 +852,12 @@ array_terms ',' array_term
struct parse_events_array new_array;
new_array.nr_ranges = $1.nr_ranges + $3.nr_ranges;
new_array.ranges = malloc(sizeof(new_array.ranges[0]) *
new_array.nr_ranges);
new_array.ranges = realloc($1.ranges,
sizeof(new_array.ranges[0]) *
new_array.nr_ranges);
ABORT_ON(!new_array.ranges);
memcpy(&new_array.ranges[0], $1.ranges,
$1.nr_ranges * sizeof(new_array.ranges[0]));
memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
$3.nr_ranges * sizeof(new_array.ranges[0]));
free($1.ranges);
free($3.ranges);
$$ = new_array;
}

View file

@ -1050,9 +1050,9 @@ static int pmu_config_term(struct list_head *formats,
if (err) {
char *pmu_term = pmu_formats_string(formats);
err->idx = term->err_term;
err->str = strdup("unknown term");
err->help = parse_events_formats_error_string(pmu_term);
parse_events__handle_error(err, term->err_term,
strdup("unknown term"),
parse_events_formats_error_string(pmu_term));
free(pmu_term);
}
return -EINVAL;
@ -1080,8 +1080,9 @@ static int pmu_config_term(struct list_head *formats,
if (term->no_value &&
bitmap_weight(format->bits, PERF_PMU_FORMAT_BITS) > 1) {
if (err) {
err->idx = term->err_val;
err->str = strdup("no value assigned for term");
parse_events__handle_error(err, term->err_val,
strdup("no value assigned for term"),
NULL);
}
return -EINVAL;
}
@ -1094,8 +1095,9 @@ static int pmu_config_term(struct list_head *formats,
term->config, term->val.str);
}
if (err) {
err->idx = term->err_val;
err->str = strdup("expected numeric value");
parse_events__handle_error(err, term->err_val,
strdup("expected numeric value"),
NULL);
}
return -EINVAL;
}
@ -1108,11 +1110,15 @@ static int pmu_config_term(struct list_head *formats,
max_val = pmu_format_max_value(format->bits);
if (val > max_val) {
if (err) {
err->idx = term->err_val;
if (asprintf(&err->str,
"value too big for format, maximum is %llu",
(unsigned long long)max_val) < 0)
err->str = strdup("value too big for format");
char *err_str;
parse_events__handle_error(err, term->err_val,
asprintf(&err_str,
"value too big for format, maximum is %llu",
(unsigned long long)max_val) < 0
? strdup("value too big for format")
: err_str,
NULL);
return -EINVAL;
}
/*
@ -1254,7 +1260,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
info->metric_name = alias->metric_name;
list_del_init(&term->list);
free(term);
parse_events_term__delete(term);
}
/*

View file

@ -153,7 +153,7 @@ static struct map *kernel_get_module_map(const char *module)
return map__get(pos);
}
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
maps__for_each_entry(maps, pos) {
/* short_name is "[module]" */
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0 &&

View file

@ -604,38 +604,26 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
const char *function,
struct probe_trace_point *tp)
{
Dwarf_Addr eaddr, highaddr;
Dwarf_Addr eaddr;
GElf_Sym sym;
const char *symbol;
/* Verify the address is correct */
if (dwarf_entrypc(sp_die, &eaddr) != 0) {
pr_warning("Failed to get entry address of %s\n",
dwarf_diename(sp_die));
return -ENOENT;
}
if (dwarf_highpc(sp_die, &highaddr) != 0) {
pr_warning("Failed to get end address of %s\n",
dwarf_diename(sp_die));
return -ENOENT;
}
if (paddr > highaddr) {
pr_warning("Offset specified is greater than size of %s\n",
if (!dwarf_haspc(sp_die, paddr)) {
pr_warning("Specified offset is out of %s\n",
dwarf_diename(sp_die));
return -EINVAL;
}
symbol = dwarf_diename(sp_die);
/* Try to get actual symbol name from symtab */
symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
if (!symbol) {
/* Try to get the symbol name from symtab */
symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
if (!symbol) {
pr_warning("Failed to find symbol at 0x%lx\n",
(unsigned long)paddr);
return -ENOENT;
}
eaddr = sym.st_value;
pr_warning("Failed to find symbol at 0x%lx\n",
(unsigned long)paddr);
return -ENOENT;
}
eaddr = sym.st_value;
tp->offset = (unsigned long)(paddr - eaddr);
tp->address = (unsigned long)paddr;
tp->symbol = strdup(symbol);
@ -756,6 +744,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
return 0;
}
/* Return innermost DIE */
static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
{
struct find_scope_param *fsp = data;
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
return 1;
}
/* Find an appropriate scope fits to given conditions */
static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
{
@ -767,8 +765,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
.die_mem = die_mem,
.found = false,
};
int ret;
cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
&fsp);
if (!ret && !fsp.found)
cu_walk_functions_at(&pf->cu_die, pf->addr,
find_inner_scope_cb, &fsp);
return fsp.found ? die_mem : NULL;
}
@ -942,7 +945,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
if (dwarf_entrypc(in_die, &addr) != 0) {
if (die_entrypc(in_die, &addr) != 0) {
pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
return -ENOENT;
@ -994,7 +997,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
param->retval = find_probe_point_by_line(pf);
} else if (die_is_func_instance(sp_die)) {
/* Instances always have the entry address */
dwarf_entrypc(sp_die, &pf->addr);
die_entrypc(sp_die, &pf->addr);
/* But in some case the entry address is 0 */
if (pf->addr == 0) {
pr_debug("%s has no entry PC. Skipped\n",
@ -1425,6 +1428,18 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
return DIE_FIND_CB_END;
}
static bool available_var_finder_overlap(struct available_var_finder *af)
{
int i;
for (i = 0; i < af->nvls; i++) {
if (af->pf.addr == af->vls[i].point.address)
return true;
}
return false;
}
/* Add a found vars into available variables list */
static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@ -1435,6 +1450,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
Dwarf_Die die_mem;
int ret;
/*
* For some reason (e.g. different column assigned to same address),
* this callback can be called with the address which already passed.
* Ignore it first.
*/
if (available_var_finder_overlap(af))
return 0;
/* Check number of tevs */
if (af->nvls == af->max_vls) {
pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
@ -1578,7 +1601,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
/* Get function entry information */
func = basefunc = dwarf_diename(&spdie);
if (!func ||
dwarf_entrypc(&spdie, &baseaddr) != 0 ||
die_entrypc(&spdie, &baseaddr) != 0 ||
dwarf_decl_line(&spdie, &baseline) != 0) {
lineno = 0;
goto post;
@ -1595,7 +1618,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
/* There is an inline function */
if (dwarf_entrypc(&indie, &_addr) == 0 &&
if (die_entrypc(&indie, &_addr) == 0 &&
_addr == addr) {
/*
* addr is at an inline function entry.

View file

@ -44,6 +44,7 @@ struct record_opts {
bool strict_freq;
bool sample_id;
bool no_bpf_event;
bool kcore;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;

View file

@ -227,9 +227,13 @@ struct perf_session *perf_session__new(struct perf_data *data,
/* Open the directory data. */
if (data->is_dir) {
ret = perf_data__open_dir(data);
if (ret)
goto out_delete;
if (ret)
goto out_delete;
}
if (!symbol_conf.kallsyms_name &&
!symbol_conf.vmlinux_name)
symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
}
} else {
session->machines.host.env = &perf_env;

View file

@ -100,6 +100,15 @@ static void aggr_printout(struct perf_stat_config *config,
nr,
config->csv_sep);
break;
case AGGR_NODE:
fprintf(config->output, "N%*d%s%*d%s",
config->csv_output ? 0 : -5,
id,
config->csv_sep,
config->csv_output ? 0 : 4,
nr,
config->csv_sep);
break;
case AGGR_NONE:
if (evsel->percore) {
fprintf(config->output, "S%d-D%d-C%*d%s",
@ -965,6 +974,11 @@ static void print_interval(struct perf_stat_config *config,
if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
switch (config->aggr_mode) {
case AGGR_NODE:
fprintf(output, "# time node cpus");
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
case AGGR_SOCKET:
fprintf(output, "# time socket cpus");
if (!metric_only)
@ -1188,6 +1202,7 @@ perf_evlist__print_counters(struct evlist *evlist,
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NODE:
print_aggr(config, evlist, prefix);
break;
case AGGR_THREAD:

View file

@ -299,6 +299,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NODE:
case AGGR_NONE:
if (!evsel->snapshot)
perf_evsel__compute_deltas(evsel, cpu, thread, count);

View file

@ -47,6 +47,7 @@ enum aggr_mode {
AGGR_CORE,
AGGR_THREAD,
AGGR_UNSET,
AGGR_NODE,
};
enum {

View file

@ -242,28 +242,24 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
void map_groups__fixup_end(struct map_groups *mg)
{
struct maps *maps = &mg->maps;
struct map *next, *curr;
struct map *prev = NULL, *curr;
down_write(&maps->lock);
curr = maps__first(maps);
if (curr == NULL)
goto out_unlock;
maps__for_each_entry(maps, curr) {
if (prev != NULL && !prev->end)
prev->end = curr->start;
for (next = map__next(curr); next; next = map__next(curr)) {
if (!curr->end)
curr->end = next->start;
curr = next;
prev = curr;
}
/*
* We still haven't the actual symbols, so guess the
* last map final address.
*/
if (!curr->end)
if (curr && !curr->end)
curr->end = ~0ULL;
out_unlock:
up_write(&maps->lock);
}
@ -1053,11 +1049,6 @@ int compare_proc_modules(const char *from, const char *to)
return ret;
}
struct map *map_groups__first(struct map_groups *mg)
{
return maps__first(&mg->maps);
}
static int do_validate_kcore_modules(const char *filename,
struct map_groups *kmaps)
{
@ -1069,13 +1060,10 @@ static int do_validate_kcore_modules(const char *filename,
if (err)
return err;
old_map = map_groups__first(kmaps);
while (old_map) {
struct map *next = map_groups__next(old_map);
map_groups__for_each_entry(kmaps, old_map) {
struct module_info *mi;
if (!__map__is_kmodule(old_map)) {
old_map = next;
continue;
}
@ -1085,8 +1073,6 @@ static int do_validate_kcore_modules(const char *filename,
err = -EINVAL;
goto out;
}
old_map = next;
}
out:
delete_modules(&modules);
@ -1189,9 +1175,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
struct map *old_map;
LIST_HEAD(merged);
for (old_map = map_groups__first(kmaps); old_map;
old_map = map_groups__next(old_map)) {
map_groups__for_each_entry(kmaps, old_map) {
/* no overload with this one */
if (new_map->end < old_map->start ||
new_map->start >= old_map->end)
@ -1264,7 +1248,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct map_groups *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL;
struct map *old_map, *new_map, *replacement_map = NULL, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@ -1311,10 +1295,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
old_map = map_groups__first(kmaps);
while (old_map) {
struct map *next = map_groups__next(old_map);
map_groups__for_each_entry_safe(kmaps, old_map, next) {
/*
* We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access
@ -1322,7 +1303,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
*/
if (old_map != map && !__map__is_bpf_prog(old_map))
map_groups__remove(kmaps, old_map);
old_map = next;
}
machine->trampolines_mapped = false;
@ -1637,7 +1617,7 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
if (map->groups && map->groups->machine)
if (map->groups)
machine = map->groups->machine;
else
machine = NULL;
@ -2371,25 +2351,3 @@ struct mem_info *mem_info__new(void)
refcount_set(&mi->refcnt, 1);
return mi;
}
struct block_info *block_info__get(struct block_info *bi)
{
if (bi)
refcount_inc(&bi->refcnt);
return bi;
}
void block_info__put(struct block_info *bi)
{
if (bi && refcount_dec_and_test(&bi->refcnt))
free(bi);
}
struct block_info *block_info__new(void)
{
struct block_info *bi = zalloc(sizeof(*bi));
if (bi)
refcount_set(&bi->refcnt, 1);
return bi;
}

View file

@ -106,18 +106,6 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
struct block_info {
struct symbol *sym;
u64 start;
u64 end;
u64 cycles;
u64 cycles_aggr;
s64 cycles_spark[NUM_SPARKS];
int num;
int num_aggr;
refcount_t refcnt;
};
struct addr_location {
struct machine *machine;
struct thread *thread;
@ -291,16 +279,4 @@ static inline void __mem_info__zput(struct mem_info **mi)
#define mem_info__zput(mi) __mem_info__zput(&mi)
struct block_info *block_info__new(void);
struct block_info *block_info__get(struct block_info *bi);
void block_info__put(struct block_info *bi);
static inline void __block_info__zput(struct block_info **bi)
{
block_info__put(*bi);
*bi = NULL;
}
#define block_info__zput(bi) __block_info__zput(&bi)
#endif /* __PERF_SYMBOL */

View file

@ -40,6 +40,7 @@ struct symbol_conf {
raw_trace,
report_hierarchy,
report_block,
report_individual_block,
inline_name,
disable_add2line_warn;
const char *vmlinux_name,

View file

@ -438,7 +438,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
maps__for_each_entry(maps, pos) {
size_t size;
if (!__map__is_kmodule(pos))

View file

@ -350,7 +350,7 @@ static int __thread__prepare_access(struct thread *thread)
down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
maps__for_each_entry(maps, map) {
err = unwind__prepare_access(thread->mg, map, &initialized);
if (err || initialized)
break;

View file

@ -182,14 +182,31 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
return rmdir(path);
}
static int rm_rf_kcore_dir(const char *path)
{
char kcore_dir_path[PATH_MAX];
const char *pat[] = {
"kcore",
"kallsyms",
"modules",
NULL,
};
snprintf(kcore_dir_path, sizeof(kcore_dir_path), "%s/kcore_dir", path);
return rm_rf_depth_pat(kcore_dir_path, 0, pat);
}
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
"header",
"data",
"data.*",
NULL,
};
rm_rf_kcore_dir(path);
return rm_rf_depth_pat(path, 0, pat);
}

View file

@ -142,9 +142,9 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread)
{
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map *map = map_groups__first(thread->mg);
struct map *map;
for (; map ; map = map_groups__next(map)) {
map_groups__for_each_entry(thread->mg, map) {
struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/')
continue;