perf vendor events: Update Intel jaketown
Update to v21, the metrics are based on TMA 4.4 full. Use script at: https://github.com/intel/event-converter-for-linux-perf/blob/master/download_and_gen.py to download and generate the latest events and metrics. Manually copy the jaketown files into perf and update mapfile.csv. Tested on a non-jaketown with 'perf test': 10: PMU events : 10.1: PMU event table sanity : Ok 10.2: PMU event map aliases : Ok 10.3: Parsing of PMU event table metrics : Ok 10.4: Parsing of PMU event table metrics with fake PMUs : Ok Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.garry@huawei.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kshipra Bopardikar <kshipra.bopardikar@intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sedat Dilek <sedat.dilek@gmail.com> Cc: Stephane Eranian <eranian@google.com> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Link: http://lore.kernel.org/lkml/20220727220832.2865794-17-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
6220136831
Коммит
376d8b581b
|
@ -124,7 +124,7 @@
|
|||
"MetricName": "FLOPc_SMT"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
|
||||
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
|
||||
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
|
||||
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
|
||||
"MetricName": "ILP"
|
||||
|
@ -141,6 +141,12 @@
|
|||
"MetricGroup": "Summary;TmaL1",
|
||||
"MetricName": "Instructions"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
|
||||
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
|
||||
"MetricGroup": "Pipeline;Ret",
|
||||
"MetricName": "Retire"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
|
||||
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
|
||||
|
@ -163,7 +169,8 @@
|
|||
"BriefDescription": "Giga Floating Point Operations Per Second",
|
||||
"MetricExpr": "( ( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE ) / 1000000000 ) / duration_time",
|
||||
"MetricGroup": "Cor;Flops;HPC",
|
||||
"MetricName": "GFLOPs"
|
||||
"MetricName": "GFLOPs",
|
||||
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
|
||||
|
|
|
@ -410,8 +410,8 @@
|
|||
},
|
||||
{
|
||||
"BriefDescription": "Reference cycles when the core is not in halt state.",
|
||||
"Counter": "Fixed counter 3",
|
||||
"CounterHTOff": "Fixed counter 3",
|
||||
"Counter": "Fixed counter 2",
|
||||
"CounterHTOff": "Fixed counter 2",
|
||||
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
|
||||
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
|
||||
"SampleAfterValue": "2000003",
|
||||
|
@ -439,8 +439,8 @@
|
|||
},
|
||||
{
|
||||
"BriefDescription": "Core cycles when the thread is not in halt state.",
|
||||
"Counter": "Fixed counter 2",
|
||||
"CounterHTOff": "Fixed counter 2",
|
||||
"Counter": "Fixed counter 1",
|
||||
"CounterHTOff": "Fixed counter 1",
|
||||
"EventName": "CPU_CLK_UNHALTED.THREAD",
|
||||
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
|
||||
"SampleAfterValue": "2000003",
|
||||
|
@ -542,8 +542,8 @@
|
|||
},
|
||||
{
|
||||
"BriefDescription": "Instructions retired from execution.",
|
||||
"Counter": "Fixed counter 1",
|
||||
"CounterHTOff": "Fixed counter 1",
|
||||
"Counter": "Fixed counter 0",
|
||||
"CounterHTOff": "Fixed counter 0",
|
||||
"EventName": "INST_RETIRED.ANY",
|
||||
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
|
||||
"SampleAfterValue": "2000003",
|
||||
|
@ -599,7 +599,7 @@
|
|||
"UMask": "0x3"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
|
||||
"BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,48 +1,850 @@
|
|||
[
|
||||
{
|
||||
"BriefDescription": "QPI clock ticks. Used to get percentages of QPI cycles events",
|
||||
"BriefDescription": "Number of qfclks",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x14",
|
||||
"EventName": "UNC_Q_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the 'GT/s' speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles where receiving QPI link is in half-width mode",
|
||||
"BriefDescription": "Count of CTO Events",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
|
||||
"MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
|
||||
"MetricName": "rxl0p_power_cycles %",
|
||||
"EventCode": "0x38",
|
||||
"EventName": "UNC_Q_CTO_COUNT",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
|
||||
"MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
|
||||
"MetricName": "txl0p_power_cycles %",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
|
||||
"PerPkg": "1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of data flits transmitted ",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
|
||||
"PerPkg": "1",
|
||||
"ScaleUnit": "8Bytes",
|
||||
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of non data (control) flits transmitted ",
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L1",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x12",
|
||||
"EventName": "UNC_Q_L1_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0p",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xf",
|
||||
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Bypassed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_Q_RxL_BYPASSED",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CRC Errors Detected; LinkInit",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CRC Errors Detected; Normal Operations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x20",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credit Consumed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1d",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Cycles Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_Q_RxL_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.DATA",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x18",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x6",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; SNP Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0xc",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_Q_RxL_INSERTS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - All Packets",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x15",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x18",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x16",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x17",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1a",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x19",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x20",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x40",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; GV",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.GV",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of stalls trying to send to R3QPI.",
|
||||
"UMask": "0x80",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0p",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Bypassed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_Q_TxL_BYPASSED",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_Q_TxL_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
|
||||
"PerPkg": "1",
|
||||
"ScaleUnit": "8Bytes",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x18",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x6",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0xc",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_Q_TxL_INSERTS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_Q_TxL_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credits Returned",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1c",
|
||||
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of VNA credits returned.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credits Pending Return - Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1b",
|
||||
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
|
||||
"Unit": "QPI LL"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,82 +1,493 @@
|
|||
[
|
||||
{
|
||||
"BriefDescription": "Memory page activates",
|
||||
"BriefDescription": "DRAM Activate Count",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_M_ACT_COUNT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "LLC_MISSES.MEM_READ",
|
||||
"EventName": "UNC_M_CAS_COUNT.ALL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0xf",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x3",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "LLC_MISSES.MEM_WRITE",
|
||||
"PerPkg": "1",
|
||||
"UMask": "0xc",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Memory controller clock ticks. Used to get percentages of memory controller cycles events",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_M_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "UNC_M_POWER_CHANNEL_PPD",
|
||||
"MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_channel_ppd %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles all ranks are in critical thermal throttle",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x86",
|
||||
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
|
||||
"MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_critical_throttle_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Memory is in self refresh power mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x43",
|
||||
"EventName": "UNC_M_POWER_SELF_REFRESH",
|
||||
"MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_self_refresh %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Memory page conflicts",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD_REG",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Occupancy counter for memory read queue",
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0xc",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x8",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "uclks",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_M_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Uncore Fixed Counter - uclks",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge All Commands",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_M_DRAM_PRE_ALL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of DRAM Refreshes Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of refreshes issued.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of DRAM Refreshes Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of refreshes issued.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "ECC Correctable Errors",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.READ",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.WRITE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Channel DLLOFF Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x84",
|
||||
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Channel PPD Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "UNC_M_POWER_CHANNEL_PPD",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x8",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x10",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x20",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x40",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
|
||||
"UMask": "0x80",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Critical Throttle Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x86",
|
||||
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Clock-Enabled Self-Refresh",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x43",
|
||||
"EventName": "UNC_M_POWER_SELF_REFRESH",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x8",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x10",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x20",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x40",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
|
||||
"UMask": "0x80",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Full Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x12",
|
||||
"EventName": "UNC_M_RPQ_CYCLES_FULL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x11",
|
||||
"EventName": "UNC_M_RPQ_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "UNC_M_RPQ_INSERTS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_M_RPQ_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Full Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x22",
|
||||
"EventName": "UNC_M_WPQ_CYCLES_FULL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x21",
|
||||
"EventName": "UNC_M_WPQ_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x20",
|
||||
"EventName": "UNC_M_WPQ_INSERTS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x81",
|
||||
"EventName": "UNC_M_WPQ_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue CAM Match",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x23",
|
||||
"EventName": "UNC_M_WPQ_READ_HIT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue CAM Match",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x24",
|
||||
"EventName": "UNC_M_WPQ_WRITE_HIT",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
|
||||
"Unit": "iMC"
|
||||
}
|
||||
]
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,272 +1,372 @@
|
|||
[
|
||||
{
|
||||
"BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
|
||||
"BriefDescription": "pclk Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_P_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE0",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1f",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE1",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x20",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE2",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x21",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE3",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x22",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE4",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x23",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE5",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x24",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE6",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x25",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE7",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_FREQ_BAND0_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band0_cycles %",
|
||||
"Filter": "PCUFilter[7:0]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_P_FREQ_BAND1_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band1_cycles %",
|
||||
"Filter": "PCUFilter[15:8]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_P_FREQ_BAND2_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band2_cycles %",
|
||||
"Filter": "PCUFilter[23:16]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_P_FREQ_BAND3_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band3_cycles %",
|
||||
"Filter": "PCUFilter[31:24]",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
|
||||
"Filter": "edge=1",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band0_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of times that the uncore transistioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
|
||||
"Filter": "edge=1",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band1_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
|
||||
"Filter": "edge=1",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band2_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
|
||||
"Filter": "edge=1",
|
||||
"MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_band3_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
|
||||
"Filter": "occ_sel=1",
|
||||
"MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_state_occupancy.cores_c0 %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
|
||||
"Filter": "occ_sel=2",
|
||||
"MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_state_occupancy.cores_c3 %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
|
||||
"Filter": "occ_sel=3",
|
||||
"MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "power_state_occupancy.cores_c6 %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
|
||||
"MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "prochot_external_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_max_limit_thermal_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_max_os_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_max_power_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
|
||||
"BriefDescription": "Current Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_max_current_cycles %",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "OS Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Power Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles spent changing Frequency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
|
||||
"MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_trans_cycles %",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
|
||||
"BriefDescription": "Memory Phase Shedding Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2f",
|
||||
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "External Prochot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Internal Prochot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Total Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
|
||||
"Filter": "filter_band0=12",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_1200mhz_cycles %",
|
||||
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
|
||||
"BriefDescription": "Cycles Changing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
|
||||
"Filter": "filter_band1=20",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_2000mhz_cycles %",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
|
||||
"BriefDescription": "Cycles Decreasing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
|
||||
"Filter": "filter_band2=30",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_3000mhz_cycles %",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
|
||||
"BriefDescription": "Cycles Increasing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
|
||||
"Filter": "filter_band3=40",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_4000mhz_cycles %",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
|
||||
"BriefDescription": "VR Hot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
|
||||
"Filter": "edge=1,filter_band0=12",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_1200mhz_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
|
||||
"Filter": "edge=1,filter_band1=20",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_2000mhz_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
|
||||
"Filter": "edge=1,filter_band2=30",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_3000mhz_cycles %",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
|
||||
"Filter": "edge=1,filter_band3=40",
|
||||
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
|
||||
"MetricName": "freq_ge_4000mhz_cycles %",
|
||||
"EventCode": "0x32",
|
||||
"EventName": "UNC_P_VR_HOT_CYCLES",
|
||||
"PerPkg": "1",
|
||||
"Unit": "PCU"
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ GenuineIntel-6-(7D|7E|A7),v1.14,icelake,core
|
|||
GenuineIntel-6-6[AC],v1.15,icelakex,core
|
||||
GenuineIntel-6-3A,v22,ivybridge,core
|
||||
GenuineIntel-6-3E,v21,ivytown,core
|
||||
GenuineIntel-6-2D,v20,jaketown,core
|
||||
GenuineIntel-6-2D,v21,jaketown,core
|
||||
GenuineIntel-6-57,v9,knightslanding,core
|
||||
GenuineIntel-6-85,v9,knightslanding,core
|
||||
GenuineIntel-6-1E,v2,nehalemep,core
|
||||
|
|
|
Загрузка…
Ссылка в новой задаче