diff options
Diffstat (limited to 'lib')
188 files changed, 19827 insertions, 1874 deletions
diff --git a/lib/Makefile b/lib/Makefile index 216ba1d58473..c3a95f00d4ef 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -73,7 +73,6 @@ SUBDIR= ${SUBDIR_BOOTSTRAP} \ liblzma \ libmemstat \ libmd \ - libmixer \ libmt \ lib80211 \ libnetbsd \ @@ -176,7 +175,6 @@ SUBDIR+= clang .endif SUBDIR.${MK_CUSE}+= libcuse -SUBDIR.${MK_CUSE}+= virtual_oss SUBDIR.${MK_TOOLCHAIN}+=libpe SUBDIR.${MK_DIALOG}+= libdpv libfigpar SUBDIR.${MK_FDT}+= libfdt @@ -238,6 +236,8 @@ SUBDIR.${MK_PMC}+= libpmc libpmcstat SUBDIR.${MK_RADIUS_SUPPORT}+= libradius SUBDIR.${MK_SENDMAIL}+= libmilter libsm libsmdb libsmutil SUBDIR.${MK_TELNET}+= libtelnet +SUBDIR.${MK_SOUND}+= libmixer +SUBDIR.${MK_CUSE}.${MK_SOUND}+= virtual_oss SUBDIR.${MK_TESTS_SUPPORT}+= atf SUBDIR.${MK_TESTS_SUPPORT}+= liblutok SUBDIR.${MK_TESTS}+= tests diff --git a/lib/clang/libclang/Makefile b/lib/clang/libclang/Makefile index 56f6fd374fe5..1b24d5cbf4cf 100644 --- a/lib/clang/libclang/Makefile +++ b/lib/clang/libclang/Makefile @@ -14,6 +14,10 @@ PRIVATELIB= # Work around "relocation R_PPC_GOT16 out of range" errors PICFLAG= -fPIC .endif +.if ${MACHINE_CPUARCH} == "aarch64" +# Only required for GCC +LIBADD+= compiler_rt +.endif .endif SHARED_CXXFLAGS+= -UPIC # To avoid compile errors diff --git a/lib/clang/liblldb/Makefile b/lib/clang/liblldb/Makefile index aa9e90b2f6f2..b2da21c2990e 100644 --- a/lib/clang/liblldb/Makefile +++ b/lib/clang/liblldb/Makefile @@ -10,6 +10,10 @@ PACKAGE= lldb SHLIB_CXX= lldb SHLIB_MAJOR= 19 PRIVATELIB= +.if ${MACHINE_CPUARCH} == "aarch64" +# Only required for GCC +LIBADD+= compiler_rt +.endif .endif SHARED_CXXFLAGS+= -UPIC # To avoid compile errors diff --git a/lib/clang/libllvm/Makefile b/lib/clang/libllvm/Makefile index 05128550827a..9b2a272d2c67 100644 --- a/lib/clang/libllvm/Makefile +++ b/lib/clang/libllvm/Makefile @@ -14,6 +14,10 @@ PRIVATELIB= # Work around "relocation R_PPC_GOT16 out of range" errors PICFLAG= -fPIC .endif +.if ${MACHINE_CPUARCH} == "aarch64" +# Only required for GCC +LIBADD+= compiler_rt +.endif CFLAGS+= -DLLVM_BUILD_LLVM_DYLIB .endif diff --git a/lib/csu/common/notes.h b/lib/csu/common/notes.h index fc4e4d082c21..8f35e0eb2491 100644 --- a/lib/csu/common/notes.h +++ b/lib/csu/common/notes.h @@ -27,6 +27,4 @@ #define NOTE_FREEBSD_VENDOR "FreeBSD" -#define NOTE_SECTION ".note.tag" - #endif diff --git a/lib/libarchive/tests/functional_test.sh b/lib/libarchive/tests/functional_test.sh index 191eae65f4dc..db3845b5e6f7 100755 --- a/lib/libarchive/tests/functional_test.sh +++ b/lib/libarchive/tests/functional_test.sh @@ -40,7 +40,7 @@ atf_init_test_cases() # Redirect stderr to stdout for the usage message because if you don't # kyua list/kyua test will break: # https://github.com/jmmv/kyua/issues/149 - testcases=$(${TESTER} -h 2>&1 | awk 'p != 0 && $1 ~ /^[0-9]+:/ { print $NF } /Available tests:/ { p=1 }') + testcases=$(${TESTER} -l 2>&1 | awk '/^ [0-9]+: / { print $2 }') for testcase in ${testcases}; do atf_test_case ${testcase} eval "${testcase}_body() { check ${testcase}; }" diff --git a/lib/libbe/be.c b/lib/libbe/be.c index 4a7c2e43b2c1..2147435ebf9b 100644 --- a/lib/libbe/be.c +++ b/lib/libbe/be.c @@ -891,6 +891,54 @@ be_create_from_existing(libbe_handle_t *lbh, const char *bename, const char *old return (set_error(lbh, err)); } +/* + * Create a zfs dataset and map the return to libbe error. + */ +static int +be_zfs_create(libzfs_handle_t *lzh, const char *buf, zfs_type_t t, + nvlist_t *props) +{ + int err; + + if ((err = zfs_create(lzh, buf, t, props)) != 0) { + switch (err) { + case EZFS_EXISTS: + return (BE_ERR_EXISTS); + case EZFS_NOENT: + return (BE_ERR_NOENT); + case EZFS_BADTYPE: + case EZFS_BADVERSION: + return (BE_ERR_NOPOOL); + case EZFS_BADPROP: + default: + /* We set something up wrong, probably... */ + return (BE_ERR_UNKNOWN); + } + } + + return (BE_ERR_SUCCESS); +} + + +/* + * Create an empty boot environment. + */ +int +be_create_empty(libbe_handle_t *lbh, const char *bename) +{ + char buf[BE_MAXPATHLEN]; + int err; + + if ((err = be_validate_name(lbh, bename)) != 0) + return (set_error(lbh, err)); + + if ((err = be_root_concat(lbh, bename, buf)) != 0) + return (set_error(lbh, err)); + + err = be_zfs_create(lbh->lzh, buf, ZFS_TYPE_FILESYSTEM, NULL); + return (set_error(lbh, err)); +} + /* * Verifies that a snapshot has a valid name, exists, and has a mountpoint of @@ -1118,21 +1166,9 @@ be_create_child_noent(libbe_handle_t *lbh, const char *active, nvlist_add_string(props, "mountpoint", child_path); /* Create */ - if ((err = zfs_create(lbh->lzh, active, ZFS_TYPE_DATASET, + if ((err = be_zfs_create(lbh->lzh, active, ZFS_TYPE_DATASET, props)) != 0) { - switch (err) { - case EZFS_EXISTS: - return (set_error(lbh, BE_ERR_EXISTS)); - case EZFS_NOENT: - return (set_error(lbh, BE_ERR_NOENT)); - case EZFS_BADTYPE: - case EZFS_BADVERSION: - return (set_error(lbh, BE_ERR_NOPOOL)); - case EZFS_BADPROP: - default: - /* We set something up wrong, probably... */ - return (set_error(lbh, BE_ERR_UNKNOWN)); - } + return (set_error(lbh, err)); } nvlist_free(props); diff --git a/lib/libbe/be.h b/lib/libbe/be.h index d3f47c0604fe..18be067c4410 100644 --- a/lib/libbe/be.h +++ b/lib/libbe/be.h @@ -65,6 +65,7 @@ bool be_is_auto_snapshot_name(libbe_handle_t *, const char *); /* Bootenv creation functions */ int be_create(libbe_handle_t *, const char *); int be_create_depth(libbe_handle_t *, const char *, const char *, int); +int be_create_empty(libbe_handle_t *, const char *); int be_create_from_existing(libbe_handle_t *, const char *, const char *); int be_create_from_existing_snap(libbe_handle_t *, const char *, const char *); int be_snapshot(libbe_handle_t *, const char *, const char *, bool, char *); diff --git a/lib/libbe/libbe.3 b/lib/libbe/libbe.3 index 4331713e9227..f65da5ef1090 100644 --- a/lib/libbe/libbe.3 +++ b/lib/libbe/libbe.3 @@ -25,7 +25,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd December 11, 2025 +.Dd February 4, 2026 .Dt LIBBE 3 .Os .Sh NAME @@ -69,6 +69,9 @@ .Fn be_create_depth "libbe_handle_t *hdl" "const char *be_name" "const char *snap" "int depth" .Pp .Ft int +.Fn be_create_empty "libbe_handle_t *hdl" "const char *be_name" +.Pp +.Ft int .Fn be_create_from_existing "libbe_handle_t *hdl" "const char *be_name" "const char *be_origin" .Pp .Ft int @@ -282,6 +285,10 @@ A depth of '0' is no recursion and '-1' is unlimited (i.e., a recursive boot environment). .Pp The +.Fn be_create_empty +function creates an empty boot environment with the given name. +.Pp +The .Fn be_create_from_existing function creates a boot environment with the given name from the name of an existing boot environment. diff --git a/lib/libbe/tests/Makefile b/lib/libbe/tests/Makefile index dfe49bd7f3e5..57e33b75aabc 100644 --- a/lib/libbe/tests/Makefile +++ b/lib/libbe/tests/Makefile @@ -20,6 +20,6 @@ CFLAGS+= -I${ZFSTOP}/lib/libspl/include/os/freebsd CFLAGS+= -I${SRCTOP}/sys CFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/include CFLAGS+= -include ${ZFSTOP}/include/os/freebsd/spl/sys/ccompile.h -CFLAGS+= -DHAVE_ISSETUGID +CFLAGS+= -DHAVE_ISSETUGID -DHAVE_STRLCAT -DHAVE_STRLCPY .include <bsd.test.mk> diff --git a/lib/libbe/tests/be_create.sh b/lib/libbe/tests/be_create.sh index e1342368995d..e39ce1a0ccb4 100755 --- a/lib/libbe/tests/be_create.sh +++ b/lib/libbe/tests/be_create.sh @@ -174,6 +174,15 @@ libbe_create_body() # the child dataset should exist atf_check -o not-empty \ zfs list "${zpool}/ROOT/relative-snap/usr" + + # test empty BE creation. + atf_check $prog "${zpool}/ROOT" \ + empty \ + ignored \ + 0 + # the dataset should exist + atf_check -o not-empty \ + zfs list "${zpool}/ROOT/empty" } libbe_create_cleanup() diff --git a/lib/libbe/tests/target_prog.c b/lib/libbe/tests/target_prog.c index 4c79d1a30714..83ef68ab2890 100644 --- a/lib/libbe/tests/target_prog.c +++ b/lib/libbe/tests/target_prog.c @@ -27,6 +27,7 @@ #include <sys/cdefs.h> #include <be.h> +#include <string.h> /* * argv[1] = root boot environment (e.g. zroot/ROOT), @@ -36,15 +37,17 @@ */ int main(int argc, char *argv[]) { - libbe_handle_t *lbh; + libbe_handle_t *lbh; if (argc != 5) - return -1; + return (-1); - if ((lbh = libbe_init(argv[1])) == NULL) - return -1; + if ((lbh = libbe_init(argv[1])) == NULL) + return (-1); libbe_print_on_error(lbh, true); + if (strcmp(argv[2], "empty") == 0) + return (be_create_empty(lbh, argv[2])); return (be_create_depth(lbh, argv[2], argv[3], atoi(argv[4]))); } diff --git a/lib/libc/Makefile b/lib/libc/Makefile index 34474cfa9fe4..56818e07aa96 100644 --- a/lib/libc/Makefile +++ b/lib/libc/Makefile @@ -142,9 +142,6 @@ CFLAGS+= -DYP .if ${MK_HESIOD} != "no" CFLAGS+= -DHESIOD .endif -.if ${MK_FP_LIBC} == "no" -CFLAGS+= -DNO_FLOATING_POINT -.endif .if ${MK_NS_CACHING} != "no" CFLAGS+= -DNS_CACHING .endif diff --git a/lib/libc/aarch64/gen/makecontext.c b/lib/libc/aarch64/gen/makecontext.c index 8c9b4edd7a15..2d0ee1043a9b 100644 --- a/lib/libc/aarch64/gen/makecontext.c +++ b/lib/libc/aarch64/gen/makecontext.c @@ -74,7 +74,8 @@ __makecontext(ucontext_t *ucp, void (*func)(void), int argc, ...) va_end(ap); /* Set the stack */ - gp->gp_sp = STACKALIGN(ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size); + gp->gp_sp = STACKALIGN((uintptr_t)ucp->uc_stack.ss_sp + + ucp->uc_stack.ss_size); /* Arrange for return via the trampoline code. */ gp->gp_elr = (__register_t)_ctx_start; gp->gp_x[19] = (__register_t)func; diff --git a/lib/libc/aarch64/string/Makefile.inc b/lib/libc/aarch64/string/Makefile.inc index 35523fb954be..bc05e849aa20 100644 --- a/lib/libc/aarch64/string/Makefile.inc +++ b/lib/libc/aarch64/string/Makefile.inc @@ -5,10 +5,8 @@ AARCH64_STRING_FUNCS= \ memcmp \ - memcpy \ memmove \ memrchr \ - memset \ stpcpy \ strchr \ strchrnul \ @@ -16,6 +14,11 @@ AARCH64_STRING_FUNCS= \ strnlen \ strrchr +AARCH64_STRING_IFUNC_FILES= \ + memcpy-mops.S \ + memmove-mops.S \ + memset-mops.S + # SIMD-enhanced routines not derived from Arm's code MDSRCS+= \ memchr.S \ @@ -34,7 +37,13 @@ MDSRCS+= \ timingsafe_bcmp.S \ timingsafe_memcmp.S \ bcopy.c \ - bzero.c + bzero.c \ + memcpy.S \ + memcpy_resolver.c \ + memmove_resolver.c \ + memset.S \ + memset_resolver.c \ + memset_zva64.S # # Add the above functions. Generate an asm file that includes the needed @@ -43,8 +52,8 @@ MDSRCS+= \ # override the generated file in these cases. # .for FUNC in ${AARCH64_STRING_FUNCS} -.if !exists(${FUNC}.S) -${FUNC}.S: +.if !exists(${LIBC_SRCTOP}/aarch64/string/${FUNC}.S) +${FUNC}.S: ${LIBC_SRCTOP}/aarch64/string/Makefile.inc printf '/* %sgenerated by libc/aarch64/string/Makefile.inc */\n' @ > ${.TARGET} printf '#define __%s_aarch64 %s\n' ${FUNC} ${FUNC} >> ${.TARGET} printf '#include "aarch64/%s.S"\n' ${FUNC} >> ${.TARGET} @@ -55,6 +64,18 @@ MDSRCS+= ${FUNC}.S CFLAGS.${FUNC}.S+=-I${SRCTOP}/contrib/arm-optimized-routines/string .endfor -# memchr.S is a wrapper in the src tree for the implementation from +.for FILE in ${AARCH64_STRING_IFUNC_FILES} +${FILE}: ${LIBC_SRCTOP}/aarch64/string/Makefile.inc + printf '/* %sgenerated by libc/aarch64/string/Makefile.inc */\n' @ > ${.TARGET} + printf '#include "aarch64/%s"\n' ${FILE} >> ${.TARGET} +CLEANFILES+= ${FILE} +MDSRCS+= ${FILE} +CFLAGS.${FILE}+=-I${SRCTOP}/contrib/arm-optimized-routines/string +.endfor + +# Several files are wrappers in the src tree for the implementation from # arm-optimized-routines CFLAGS.memchr.S+=-I${SRCTOP}/contrib/arm-optimized-routines/string +CFLAGS.memcpy.S+=-I${SRCTOP}/contrib/arm-optimized-routines/string +CFLAGS.memset.S+=-I${SRCTOP}/contrib/arm-optimized-routines/string +CFLAGS.memset_zva64.S+=-I${SRCTOP}/contrib/arm-optimized-routines/string diff --git a/lib/libc/aarch64/string/memcpy.S b/lib/libc/aarch64/string/memcpy.S index 53e860750eb2..c4601d158d6b 100644 --- a/lib/libc/aarch64/string/memcpy.S +++ b/lib/libc/aarch64/string/memcpy.S @@ -1,3 +1 @@ -#define __memcpy_aarch64_simd memcpy -#define __memmove_aarch64_simd memmove #include "aarch64/memcpy-advsimd.S" diff --git a/lib/libc/powerpcspe/gen/fabs.S b/lib/libc/aarch64/string/memcpy_resolver.c index df9196c3273d..c2a7477a939e 100644 --- a/lib/libc/powerpcspe/gen/fabs.S +++ b/lib/libc/aarch64/string/memcpy_resolver.c @@ -1,6 +1,7 @@ -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2026 Arm Ltd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,15 +24,19 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include <sys/types.h> +#include <machine/ifunc.h> -#include <machine/asm.h> -/* - * double fabs(double) - */ -ENTRY(fabs) - /* arg is split in two words, clear sign bit only, in r3. */ - clrlwi %r3,%r3,1 - blr -END(fabs) +#include <elf.h> + +void *__memcpy_aarch64_simd(void *, const void *, size_t); +void *__memcpy_aarch64_mops(void *, const void *, size_t); + +DEFINE_UIFUNC(, void *, memcpy, (void *, const void *, size_t)) +{ + if (ifunc_arg->_hwcap2 & HWCAP2_MOPS) + return (__memcpy_aarch64_mops); + + return (__memcpy_aarch64_simd); +} - .section .note.GNU-stack,"",%progbits diff --git a/lib/libc/powerpcspe/_fpmath.h b/lib/libc/aarch64/string/memmove_resolver.c index 9bc7450aacaf..95cf3deca966 100644 --- a/lib/libc/powerpcspe/_fpmath.h +++ b/lib/libc/aarch64/string/memmove_resolver.c @@ -1,8 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2003 David Schultz <das@FreeBSD.ORG> - * All rights reserved. + * Copyright (c) 2026 Arm Ltd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -25,32 +24,19 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include <sys/types.h> +#include <machine/ifunc.h> -union IEEEl2bits { - long double e; - struct { -#if _BYTE_ORDER == _LITTLE_ENDIAN - unsigned int manl :32; - unsigned int manh :20; - unsigned int exp :11; - unsigned int sign :1; -#else /* _BYTE_ORDER == _LITTLE_ENDIAN */ - unsigned int sign :1; - unsigned int exp :11; - unsigned int manh :20; - unsigned int manl :32; -#endif - } bits; -}; +#include <elf.h> -#define mask_nbit_l(u) ((void)0) -#define LDBL_IMPLICIT_NBIT -#define LDBL_NBIT 0 +void *__memmove_aarch64_simd(void *, const void *, size_t); +void *__memmove_aarch64_mops(void *, const void *, size_t); -#define LDBL_MANH_SIZE 20 -#define LDBL_MANL_SIZE 32 +DEFINE_UIFUNC(, void *, memmove, (void *, const void *, size_t)) +{ + if (ifunc_arg->_hwcap2 & HWCAP2_MOPS) + return (__memmove_aarch64_mops); + + return (__memmove_aarch64_simd); +} -#define LDBL_TO_ARRAY32(u, a) do { \ - (a)[0] = (uint32_t)(u).bits.manl; \ - (a)[1] = (uint32_t)(u).bits.manh; \ -} while(0) diff --git a/lib/libc/aarch64/string/memset.S b/lib/libc/aarch64/string/memset.S new file mode 100644 index 000000000000..acf707cdb7ec --- /dev/null +++ b/lib/libc/aarch64/string/memset.S @@ -0,0 +1 @@ +#include "aarch64/memset.S" diff --git a/lib/libc/aarch64/string/memset_resolver.c b/lib/libc/aarch64/string/memset_resolver.c new file mode 100644 index 000000000000..277e8c177de9 --- /dev/null +++ b/lib/libc/aarch64/string/memset_resolver.c @@ -0,0 +1,55 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2026 Arm Ltd + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include <sys/types.h> + +#include <machine/armreg.h> +#include <machine/ifunc.h> + +#include <elf.h> + +void *__memset_aarch64(void *, int, size_t); +void *__memset_aarch64_zva64(void *, int, size_t); +void *__memset_aarch64_mops(void *, int, size_t); + +DEFINE_UIFUNC(, void *, memset, (void *, int, size_t)) +{ + uint64_t dczid; + + if (ifunc_arg->_hwcap2 & HWCAP2_MOPS) + return (__memset_aarch64_mops); + + /* + * Check for the DC ZVA instruction, and it will + * zero 64 bytes (4 * 4byte words). + */ + dczid = READ_SPECIALREG(dczid_el0); + if ((dczid & DCZID_DZP) == 0 && DCZID_BS_SIZE(dczid) == 4) + return (__memset_aarch64_zva64); + + return (__memset_aarch64); +} + diff --git a/lib/libc/aarch64/string/memset_zva64.S b/lib/libc/aarch64/string/memset_zva64.S new file mode 100644 index 000000000000..7f1cf6ba577a --- /dev/null +++ b/lib/libc/aarch64/string/memset_zva64.S @@ -0,0 +1,4 @@ +/* Used when we know we have a 64-byte dc zva instruction */ +#define __memset_aarch64 __memset_aarch64_zva64 +#define SKIP_ZVA_CHECK +#include "aarch64/memset.S" diff --git a/lib/libc/arm/aeabi/aeabi_unwind_cpp.c b/lib/libc/arm/aeabi/aeabi_unwind_cpp.c index efcace2c0675..3667b7d7e673 100644 --- a/lib/libc/arm/aeabi/aeabi_unwind_cpp.c +++ b/lib/libc/arm/aeabi/aeabi_unwind_cpp.c @@ -27,15 +27,15 @@ * */ +#include <sys/param.h> + /* * Provide an implementation of __aeabi_unwind_cpp_pr{0,1,2}. These are * required by libc but are implemented in libgcc_eh.a which we don't link - * against. The libgcc_eh.a version will be called so we call abort to + * against. The libgcc_eh.a version will be called so we trap to * check this. */ -#include <stdlib.h> - void __aeabi_unwind_cpp_pr0(void) __hidden; void __aeabi_unwind_cpp_pr1(void) __hidden; void __aeabi_unwind_cpp_pr2(void) __hidden; @@ -43,18 +43,18 @@ void __aeabi_unwind_cpp_pr2(void) __hidden; void __aeabi_unwind_cpp_pr0(void) { - abort(); + __builtin_trap(); } void __aeabi_unwind_cpp_pr1(void) { - abort(); + __builtin_trap(); } void __aeabi_unwind_cpp_pr2(void) { - abort(); + __builtin_trap(); } diff --git a/lib/libc/csu/aarch64/reloc.c b/lib/libc/csu/aarch64/reloc.c index 4ba7920bcb07..77e8a38176ce 100644 --- a/lib/libc/csu/aarch64/reloc.c +++ b/lib/libc/csu/aarch64/reloc.c @@ -25,17 +25,42 @@ */ #include <sys/cdefs.h> +#include <machine/ifunc.h> + +static __ifunc_arg_t ifunc_arg; static void -ifunc_init(const Elf_Auxinfo *aux __unused) +ifunc_init(const Elf_Auxinfo *aux) { + ifunc_arg._size = sizeof(ifunc_arg); + ifunc_arg._hwcap = 0; + ifunc_arg._hwcap2 = 0; + ifunc_arg._hwcap3 = 0; + ifunc_arg._hwcap4 = 0; + + for (; aux->a_type != AT_NULL; aux++) { + switch (aux->a_type) { + case AT_HWCAP: + ifunc_arg._hwcap = aux->a_un.a_val | _IFUNC_ARG_HWCAP; + break; + case AT_HWCAP2: + ifunc_arg._hwcap2 = aux->a_un.a_val; + break; + case AT_HWCAP3: + ifunc_arg._hwcap3 = aux->a_un.a_val; + break; + case AT_HWCAP4: + ifunc_arg._hwcap4 = aux->a_un.a_val; + break; + } + } } static void crt1_handle_rela(const Elf_Rela *r) { typedef Elf_Addr (*ifunc_resolver_t)( - uint64_t, uint64_t, uint64_t, uint64_t, + uint64_t, const __ifunc_arg_t *, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); Elf_Addr *ptr, *where, target; @@ -43,7 +68,7 @@ crt1_handle_rela(const Elf_Rela *r) case R_AARCH64_IRELATIVE: ptr = (Elf_Addr *)r->r_addend; where = (Elf_Addr *)r->r_offset; - target = ((ifunc_resolver_t)ptr)(0, 0, 0, 0, 0, 0, 0, 0); + target = ((ifunc_resolver_t)ptr)(ifunc_arg._hwcap, &ifunc_arg, 0, 0, 0, 0, 0, 0); *where = target; break; } diff --git a/lib/libc/csu/powerpcspe/Makefile.inc b/lib/libc/csu/powerpcspe/Makefile.inc deleted file mode 100644 index ddead75f874d..000000000000 --- a/lib/libc/csu/powerpcspe/Makefile.inc +++ /dev/null @@ -1,3 +0,0 @@ -# - -CFLAGS+= -DCRT_IRELOC_SUPPRESS diff --git a/lib/libc/db/btree/bt_seq.c b/lib/libc/db/btree/bt_seq.c index 2562724faf33..fc7fa693b747 100644 --- a/lib/libc/db/btree/bt_seq.c +++ b/lib/libc/db/btree/bt_seq.c @@ -325,7 +325,7 @@ usecurrent: F_CLR(c, CURS_AFTER | CURS_BEFORE); static int __bt_first(BTREE *t, const DBT *key, EPG *erval, int *exactp) { - PAGE *h; + PAGE *h, *hprev; EPG *ep, save; pgno_t pg; @@ -338,7 +338,7 @@ __bt_first(BTREE *t, const DBT *key, EPG *erval, int *exactp) * page) and return it. */ if ((ep = __bt_search(t, key, exactp)) == NULL) - return (0); + return (RET_SPECIAL); if (*exactp) { if (F_ISSET(t, B_NODUPS)) { *erval = *ep; @@ -369,14 +369,14 @@ __bt_first(BTREE *t, const DBT *key, EPG *erval, int *exactp) break; if (h->pgno != save.page->pgno) mpool_put(t->bt_mp, h, 0); - if ((h = mpool_get(t->bt_mp, + if ((hprev = mpool_get(t->bt_mp, h->prevpg, 0)) == NULL) { if (h->pgno == save.page->pgno) mpool_put(t->bt_mp, save.page, 0); return (RET_ERROR); } - ep->page = h; + ep->page = h = hprev; ep->index = NEXTINDEX(h); } --ep->index; diff --git a/lib/libc/db/btree/bt_split.c b/lib/libc/db/btree/bt_split.c index 5fbf8c3ac0f3..865543928c35 100644 --- a/lib/libc/db/btree/bt_split.c +++ b/lib/libc/db/btree/bt_split.c @@ -542,7 +542,7 @@ bt_broot(BTREE *t, PAGE *h, PAGE *l, PAGE *r) * If the key is on an overflow page, mark the overflow chain * so it isn't deleted when the leaf copy of the key is deleted. */ - if (bl->flags & P_BIGKEY) { + if (bl->flags & P_BIGKEY) { pgno_t pgno; memcpy(&pgno, bl->bytes, sizeof(pgno)); if (bt_preserve(t, pgno) == RET_ERROR) diff --git a/lib/libc/gen/Makefile.inc b/lib/libc/gen/Makefile.inc index 4d064d18d36e..28e55f58ccf3 100644 --- a/lib/libc/gen/Makefile.inc +++ b/lib/libc/gen/Makefile.inc @@ -274,8 +274,10 @@ MAN+= alarm.3 \ posix_spawn.3 \ posix_spawn_file_actions_addopen.3 \ posix_spawn_file_actions_init.3 \ + posix_spawnattr_getexecfd_np.3 \ posix_spawnattr_getflags.3 \ posix_spawnattr_getpgroup.3 \ + posix_spawnattr_getprocdescp_np.3 \ posix_spawnattr_getschedparam.3 \ posix_spawnattr_getschedpolicy.3 \ posix_spawnattr_init.3 \ @@ -321,6 +323,7 @@ MAN+= alarm.3 \ ttyname.3 \ ualarm.3 \ ucontext.3 \ + uexterr_gettext.3 \ ulimit.3 \ uname.3 \ unvis.3 \ @@ -469,7 +472,9 @@ MLINKS+=posix_spawn.3 posix_spawnp.3 \ posix_spawn_file_actions_addopen.3 posix_spawn_file_actions_addfchdir_np.3 \ posix_spawn_file_actions_init.3 posix_spawn_file_actions_destroy.3 \ posix_spawnattr_getflags.3 posix_spawnattr_setflags.3 \ + posix_spawnattr_getexecfd_np.3 posix_spawnattr_setexecfd_np.3 \ posix_spawnattr_getpgroup.3 posix_spawnattr_setpgroup.3 \ + posix_spawnattr_getprocdescp_np.3 posix_spawnattr_setprocdescp_np.3 \ posix_spawnattr_getschedparam.3 posix_spawnattr_setschedparam.3 \ posix_spawnattr_getschedpolicy.3 posix_spawnattr_setschedpolicy.3 \ posix_spawnattr_getsigdefault.3 posix_spawnattr_setsigdefault.3 \ @@ -583,8 +588,8 @@ install-passwd: .PHONY ${PWD_MKDB_CMD} -i -p -d ${DESTDIR}/etc ${DESTDIR}/etc/master.passwd .if defined(NO_ROOT) && defined(METALOG) ( \ - echo ".${DISTBASE}/etc/pwd.db type=file mode=0644 uname=root gname=wheel"; \ - echo ".${DISTBASE}/etc/spwd.db type=file mode=0600 uname=root gname=wheel"; \ - echo ".${DISTBASE}/etc/passwd type=file mode=0644 uname=root gname=wheel"; \ + echo ".${DISTBASE}/etc/pwd.db type=file uname=root gname=wheel mode=0644"; \ + echo ".${DISTBASE}/etc/spwd.db type=file uname=root gname=wheel mode=0600"; \ + echo ".${DISTBASE}/etc/passwd type=file uname=root gname=wheel mode=0644"; \ ) | cat -l >> ${METALOG} .endif diff --git a/lib/libc/gen/Symbol.map b/lib/libc/gen/Symbol.map index 494b65bc5cc1..ddbd0522e13f 100644 --- a/lib/libc/gen/Symbol.map +++ b/lib/libc/gen/Symbol.map @@ -474,6 +474,13 @@ FBSD_1.8 { str2sig; }; +FBSD_1.9 { + posix_spawnattr_getexecfd_np; + posix_spawnattr_getprocdescp_np; + posix_spawnattr_setexecfd_np; + posix_spawnattr_setprocdescp_np; +}; + FBSDprivate_1.0 { /* needed by thread libraries */ __thr_jtable; diff --git a/lib/libc/gen/closedir.c b/lib/libc/gen/closedir.c index 6015114d6c47..73070d171cd1 100644 --- a/lib/libc/gen/closedir.c +++ b/lib/libc/gen/closedir.c @@ -68,6 +68,5 @@ fdclosedir(DIR *dirp) int closedir(DIR *dirp) { - return (_close(fdclosedir(dirp))); } diff --git a/lib/libc/gen/directory.3 b/lib/libc/gen/directory.3 index 263dfdd6eb95..270abb2d2371 100644 --- a/lib/libc/gen/directory.3 +++ b/lib/libc/gen/directory.3 @@ -25,7 +25,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd August 1, 2020 +.Dd January 31, 2026 .Dt DIRECTORY 3 .Os .Sh NAME @@ -86,13 +86,6 @@ and returns a pointer to be used to identify the .Em directory stream in subsequent operations. -The pointer -.Dv NULL -is returned if -.Fa filename -cannot be accessed, or if it cannot -.Xr malloc 3 -enough memory to hold the whole thing. .Pp The .Fn fdopendir @@ -134,14 +127,6 @@ or .Fn closedir on the same .Em directory stream . -The function returns -.Dv NULL -upon reaching the end of the directory or on error. -In the event of an error, -.Va errno -may be set to any of the values documented for the -.Xr getdirentries 2 -system call. .Pp The .Fn readdir_r @@ -166,10 +151,6 @@ upon reaching the end of the directory .Fa result is set to .Dv NULL . -The -.Fn readdir_r -function -returns 0 on success or an error number to indicate failure. .Pp The .Fn telldir @@ -179,12 +160,9 @@ returns a token representing the current location associated with the named Values returned by .Fn telldir are good only for the lifetime of the -.Dv DIR -pointer, -.Fa dirp , +.Em directory stream from which they are derived. -If the directory is closed and then -reopened, prior values returned by +If the directory is closed and then reopened, prior values returned by .Fn telldir will no longer be valid. Values returned by @@ -217,28 +195,22 @@ The function closes the named .Em directory stream -and frees the structure associated with the -.Fa dirp -pointer, -returning 0 on success. -On failure, \-1 is returned and the global variable -.Va errno -is set to indicate the error. +and frees the structure associated with +.Fa dirp . .Pp The .Fn fdclosedir function is equivalent to the .Fn closedir -function except that this function returns directory file descriptor instead of -closing it. +function except that it returns the file descriptor associated with +.Fa dirp +instead of closing it. .Pp The .Fn dirfd function -returns the integer file descriptor associated with the named -.Em directory stream , -see -.Xr open 2 . +returns the file descriptor associated with +.Fa dirp . .Sh EXAMPLES Sample code which searches a directory for entry ``name'' is: .Bd -literal -offset indent @@ -255,6 +227,38 @@ while ((dp = readdir(dirp)) != NULL) { (void)closedir(dirp); return (NOT_FOUND); .Ed +.Sh RETURN VALUES +The +.Fn opendir +and +.Fn fdopendir +functions return a pointer to the new +.Em directory stream +on success and +.Dv NULL +on failure. +.Pp +The +.Fn readdir +function returns a pointer to a directory entry on success and +.Dv NULL +on failure. +The +.Fn readdir_r +function returns 0 on success and an error number on failure. +.Pp +The +.Fn telldir +function returns a nonnegative value on success and -1 on failure. +.Pp +The +.Fn closedir +function returns 0 on success and -1 on failure. +The +.Fn fdclosedir +and +.Fn dirfd +functions return an open file descriptor on success and -1 on failure. .Sh ERRORS The .Fn opendir @@ -325,6 +329,16 @@ function may also fail and set .Va errno for any of the errors specified for the routine .Xr close 2 . +.Pp +The +.Fn dirfd +function will fail if: +.Bl -tag -width Er +.It Bq Er EINVAL +The +.Fa dirp +argument does not refer to a valid directory stream. +.El .Sh SEE ALSO .Xr close 2 , .Xr lseek 2 , diff --git a/lib/libc/gen/dirfd.c b/lib/libc/gen/dirfd.c index 85090bd4da6c..984e69b9ae9a 100644 --- a/lib/libc/gen/dirfd.c +++ b/lib/libc/gen/dirfd.c @@ -27,9 +27,9 @@ */ #include "namespace.h" -#include <sys/param.h> - #include <dirent.h> +#include <errno.h> +#include <stddef.h> #include "un-namespace.h" #include "gen-private.h" @@ -37,6 +37,9 @@ int dirfd(DIR *dirp) { - + if (dirp == NULL || _dirfd(dirp) < 0) { + errno = EINVAL; + return (-1); + } return (_dirfd(dirp)); } diff --git a/lib/libc/gen/err.c b/lib/libc/gen/err.c index 793bf7522e42..2628eb8186cc 100644 --- a/lib/libc/gen/err.c +++ b/lib/libc/gen/err.c @@ -163,14 +163,14 @@ _warn(const char *fmt, ...) { va_list ap; va_start(ap, fmt); - vwarnc(errno, fmt, ap); + vwarnci(true, errno, fmt, ap); va_end(ap); } void vwarn(const char *fmt, va_list ap) { - vwarnc(errno, fmt, ap); + vwarnci(true, errno, fmt, ap); } void diff --git a/lib/libc/gen/exterr_cat_filenames.h b/lib/libc/gen/exterr_cat_filenames.h index 883dd98289cd..d55396a1b286 100644 --- a/lib/libc/gen/exterr_cat_filenames.h +++ b/lib/libc/gen/exterr_cat_filenames.h @@ -8,9 +8,11 @@ [EXTERR_CAT_GEOM] = "geom/geom_subr.c", [EXTERR_CAT_GEOMVFS] = "geom/geom_vfs.c", [EXTERR_CAT_FILEDESC] = "kern/kern_descrip.c", - [EXTERR_CAT_INOTIFY] = "kern/vfs_inotify.c", + [EXTERR_CAT_PROCEXIT] = "kern/kern_exit.c", + [EXTERR_CAT_FORK] = "kern/kern_fork.c", [EXTERR_CAT_GENIO] = "kern/sys_generic.c", [EXTERR_CAT_VFSBIO] = "kern/vfs_bio.c", + [EXTERR_CAT_INOTIFY] = "kern/vfs_inotify.c", [EXTERR_CAT_VFSSYSCALL] = "kern/vfs_syscalls.c", [EXTERR_CAT_BRIDGE] = "net/if_bridge.c", [EXTERR_CAT_SWAP] = "vm/swap_pager.c", diff --git a/lib/libc/gen/fdopendir.c b/lib/libc/gen/fdopendir.c index 9393cbe28f85..05e1a09fd00c 100644 --- a/lib/libc/gen/fdopendir.c +++ b/lib/libc/gen/fdopendir.c @@ -37,6 +37,7 @@ #include <errno.h> #include <fcntl.h> #include <stdbool.h> +#include <stddef.h> #include "un-namespace.h" #include "gen-private.h" diff --git a/lib/libc/gen/fmtcheck.c b/lib/libc/gen/fmtcheck.c index de889ad3421c..6d85e3889a4d 100644 --- a/lib/libc/gen/fmtcheck.c +++ b/lib/libc/gen/fmtcheck.c @@ -55,10 +55,8 @@ enum __e_fmtcheck_types { FMTCHECK_INTMAXTPOINTER, FMTCHECK_PTRDIFFTPOINTER, FMTCHECK_SIZETPOINTER, -#ifndef NO_FLOATING_POINT FMTCHECK_DOUBLE, FMTCHECK_LONGDOUBLE, -#endif FMTCHECK_STRING, FMTCHECK_WSTRING, FMTCHECK_WIDTH, @@ -185,7 +183,6 @@ get_next_format_from_precision(const char **pf) RETURN(pf,f,FMTCHECK_UNKNOWN); RETURN(pf,f,FMTCHECK_LONG); } -#ifndef NO_FLOATING_POINT if (strchr("aAeEfFgG", *f)) { switch (modifier) { case MOD_LONGDOUBLE: @@ -197,7 +194,6 @@ get_next_format_from_precision(const char **pf) RETURN(pf,f,FMTCHECK_UNKNOWN); } } -#endif if (*f == 'c') { switch (modifier) { case MOD_LONG: diff --git a/lib/libc/gen/opendir.c b/lib/libc/gen/opendir.c index 08d9eb10eaa2..4c08b7156055 100644 --- a/lib/libc/gen/opendir.c +++ b/lib/libc/gen/opendir.c @@ -42,6 +42,5 @@ DIR * opendir(const char *name) { - return (__opendir2(name, DTF_HIDEW | DTF_NODUP)); } diff --git a/lib/libc/gen/opendir2.c b/lib/libc/gen/opendir2.c index c5c2e662efd8..321cb2261b8c 100644 --- a/lib/libc/gen/opendir2.c +++ b/lib/libc/gen/opendir2.c @@ -68,7 +68,6 @@ __opendir2(const char *name, int flags) static int opendir_compar(const void *p1, const void *p2) { - return (strcmp((*(const struct dirent * const *)p1)->d_name, (*(const struct dirent * const *)p2)->d_name)); } diff --git a/lib/libc/gen/posix_spawn.3 b/lib/libc/gen/posix_spawn.3 index 278fee88463a..f7489890db31 100644 --- a/lib/libc/gen/posix_spawn.3 +++ b/lib/libc/gen/posix_spawn.3 @@ -455,15 +455,19 @@ action. .Xr posix_spawn_file_actions_destroy 3 , .Xr posix_spawn_file_actions_init 3 , .Xr posix_spawnattr_destroy 3 , +.Xr posix_spawnattr_getexecfd_np 3 , .Xr posix_spawnattr_getflags 3 , .Xr posix_spawnattr_getpgroup 3 , +.Xr posix_spawnattr_getprocdescp_np 3 , .Xr posix_spawnattr_getschedparam 3 , .Xr posix_spawnattr_getschedpolicy 3 , .Xr posix_spawnattr_getsigdefault 3 , .Xr posix_spawnattr_getsigmask 3 , .Xr posix_spawnattr_init 3 , +.Xr posix_spawnattr_setexecfd_np 3 , .Xr posix_spawnattr_setflags 3 , .Xr posix_spawnattr_setpgroup 3 , +.Xr posix_spawnattr_setprocdescp_np 3, .Xr posix_spawnattr_setschedparam 3 , .Xr posix_spawnattr_setschedpolicy 3 , .Xr posix_spawnattr_setsigdefault 3 , diff --git a/lib/libc/gen/posix_spawn.c b/lib/libc/gen/posix_spawn.c index a5b732696b8c..656c0f20f798 100644 --- a/lib/libc/gen/posix_spawn.c +++ b/lib/libc/gen/posix_spawn.c @@ -29,6 +29,7 @@ #include "namespace.h" #include <sys/param.h> #include <sys/procctl.h> +#include <sys/procdesc.h> #include <sys/queue.h> #include <sys/wait.h> @@ -37,6 +38,7 @@ #include <sched.h> #include <spawn.h> #include <signal.h> +#include <stdbool.h> #include <stdlib.h> #include <string.h> #include <unistd.h> @@ -50,6 +52,9 @@ struct __posix_spawnattr { int sa_schedpolicy; sigset_t sa_sigdefault; sigset_t sa_sigmask; + int sa_execfd; + int *sa_pdrfork_fdp; + int sa_pdflags; }; struct __posix_spawn_file_actions { @@ -231,7 +236,6 @@ struct posix_spawn_args { #define PSPAWN_STACK_ALIGN(sz) \ (((sz) + PSPAWN_STACK_ALIGNBYTES) & ~PSPAWN_STACK_ALIGNBYTES) -#if defined(__i386__) || defined(__amd64__) /* * Below we'll assume that _RFORK_THREAD_STACK_SIZE is appropriately aligned for * the posix_spawn() case where we do not end up calling execvpe and won't ever @@ -240,7 +244,6 @@ struct posix_spawn_args { #define _RFORK_THREAD_STACK_SIZE 4096 _Static_assert((_RFORK_THREAD_STACK_SIZE % PSPAWN_STACK_ALIGNMENT) == 0, "Inappropriate stack size alignment"); -#endif static int _posix_spawn_thr(void *data) @@ -260,7 +263,9 @@ _posix_spawn_thr(void *data) _exit(127); } envp = psa->envp != NULL ? psa->envp : environ; - if (psa->use_env_path) + if (psa->sa != NULL && (*(psa->sa))->sa_execfd != -1) + fexecve((*(psa->sa))->sa_execfd, psa->argv, envp); + else if (psa->use_env_path) __libc_execvpe(psa->path, psa->argv, envp); else _execve(psa->path, psa->argv, envp); @@ -278,12 +283,16 @@ do_posix_spawn(pid_t *pid, const char *path, { struct posix_spawn_args psa; pid_t p; -#ifdef _RFORK_THREAD_STACK_SIZE + int pfd; + bool do_pfd; char *stack; - size_t cnt, stacksz; + size_t stacksz; +#if defined(__i386__) || defined(__amd64__) stacksz = _RFORK_THREAD_STACK_SIZE; if (use_env_path) { + size_t cnt; + /* * We need to make sure we have enough room on the stack for the * potential alloca() in execvPe if it gets kicked back an @@ -310,6 +319,9 @@ do_posix_spawn(pid_t *pid, const char *path, return (ENOMEM); stacksz = (((uintptr_t)stack + stacksz) & ~PSPAWN_STACK_ALIGNBYTES) - (uintptr_t)stack; +#else + stack = NULL; + stacksz = 0; #endif psa.path = path; psa.fa = fa; @@ -319,6 +331,8 @@ do_posix_spawn(pid_t *pid, const char *path, psa.use_env_path = use_env_path; psa.error = 0; + do_pfd = sa != NULL && (*sa)->sa_pdrfork_fdp != NULL; + /* * Passing RFSPAWN to rfork(2) gives us effectively a vfork that drops * non-ignored signal handlers. We'll fall back to the slightly less @@ -330,22 +344,26 @@ do_posix_spawn(pid_t *pid, const char *path, * a special property of the libthr rtld locks ensure that * rtld is operational in the child. In particular, libthr * rtld locks do not store owner' tid into the lock word. + * + * x86 stores the return address on the stack, so rfork(2) + * cannot work as-is because the child would clobber the + * return address of the parent. Because of this, we must use + * rfork_thread instead. + * + * Every other architecture stores the return address in a + * register, the trivial rfork_thread() wrapper is provided + * for them. The only minor drawback is that the stack is + * temporarily allocated. */ -#ifdef _RFORK_THREAD_STACK_SIZE - /* - * x86 stores the return address on the stack, so rfork(2) cannot work - * as-is because the child would clobber the return address of the - * parent. Because of this, we must use rfork_thread instead while - * almost every other arch stores the return address in a register. - */ - p = rfork_thread(RFSPAWN, stack + stacksz, _posix_spawn_thr, &psa); + if (do_pfd) { + p = pdrfork_thread(&pfd, PD_CLOEXEC | (*sa)->sa_pdflags, + RFSPAWN, stack + stacksz, _posix_spawn_thr, &psa); + } else { + p = rfork_thread(RFSPAWN, stack + stacksz, _posix_spawn_thr, + &psa); + } free(stack); -#else - p = rfork(RFSPAWN); - if (p == 0) - /* _posix_spawn_thr does not return */ - _posix_spawn_thr(&psa); -#endif + /* * The above block should leave us in a state where we've either * succeeded and we're ready to process the results, or we need to @@ -353,6 +371,8 @@ do_posix_spawn(pid_t *pid, const char *path, */ if (p == -1 && errno == EINVAL) { + if (do_pfd) + return (EOPNOTSUPP); p = vfork(); if (p == 0) /* _posix_spawn_thr does not return */ @@ -360,12 +380,18 @@ do_posix_spawn(pid_t *pid, const char *path, } if (p == -1) return (errno); - if (psa.error != 0) + if (psa.error != 0) { /* Failed; ready to reap */ - _waitpid(p, NULL, WNOHANG); - else if (pid != NULL) + if (do_pfd) + (void)_close(pfd); + else + _waitpid(p, NULL, WNOHANG); + } else if (pid != NULL) { /* exec succeeded */ *pid = p; + if (do_pfd) + *((*sa)->sa_pdrfork_fdp) = pfd; + } return (psa.error); } @@ -578,6 +604,7 @@ posix_spawnattr_init(posix_spawnattr_t *ret) sa = calloc(1, sizeof(struct __posix_spawnattr)); if (sa == NULL) return (errno); + sa->sa_execfd = -1; /* Set defaults as specified by POSIX, cleared above */ *ret = sa; @@ -640,6 +667,23 @@ posix_spawnattr_getsigmask(const posix_spawnattr_t * __restrict sa, } int +posix_spawnattr_getexecfd_np(const posix_spawnattr_t * __restrict sa, + int * __restrict fdp) +{ + *fdp = (*sa)->sa_execfd; + return (0); +} + +int +posix_spawnattr_getprocdescp_np(const posix_spawnattr_t * __restrict sa, + int ** __restrict fdpp, int * __restrict pdrflagsp) +{ + *fdpp = (*sa)->sa_pdrfork_fdp; + *pdrflagsp = (*sa)->sa_pdflags; + return (0); +} + +int posix_spawnattr_setflags(posix_spawnattr_t *sa, short flags) { if ((flags & ~(POSIX_SPAWN_RESETIDS | POSIX_SPAWN_SETPGROUP | @@ -688,3 +732,20 @@ posix_spawnattr_setsigmask(posix_spawnattr_t * __restrict sa, (*sa)->sa_sigmask = *sigmask; return (0); } + +int +posix_spawnattr_setexecfd_np(posix_spawnattr_t * __restrict sa, + int execfd) +{ + (*sa)->sa_execfd = execfd; + return (0); +} + +int +posix_spawnattr_setprocdescp_np(const posix_spawnattr_t * __restrict sa, + int * __restrict fdp, int pdrflags) +{ + (*sa)->sa_pdrfork_fdp = fdp; + (*sa)->sa_pdflags = pdrflags; + return (0); +} diff --git a/lib/libc/gen/posix_spawnattr_getexecfd_np.3 b/lib/libc/gen/posix_spawnattr_getexecfd_np.3 new file mode 100644 index 000000000000..6edcfcdfb42f --- /dev/null +++ b/lib/libc/gen/posix_spawnattr_getexecfd_np.3 @@ -0,0 +1,87 @@ +.\" Copyright 2026 The FreeBSD Foundation +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.\" This documentation was written by +.\" Konstantin Belousov <kib@FreeBSD.org> under sponsorship +.\" from the FreeBSD Foundation. +.\" +.Dd January 25, 2026 +.Dt POSIX_SPAWNATTR_GETEXECFD_NP 3 +.Os +.Sh NAME +.Nm posix_spawnattr_getexecfd_np , +.Nm posix_spawnattr_setexecfd_np +.Nd "get and set the spawn-execfd attribute of a spawn attributes object" +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In spawn.h +.Ft int +.Fo posix_spawnattr_getexecfd_np +.Fa "const posix_spawnattr_t *restrict attr" +.Fa "int *restrict fdp" +.Fc +.Ft int +.Fo posix_spawnattr_setexecfd_np +.Fa "posix_spawnattr_t *attr" +.Fa "int fd" +.Fc +.Sh DESCRIPTION +The +.Fn posix_spawnattr_getexecfd_np +function obtains the value of the spawn-execfd attribute from the +attributes object referenced by +.Fa attr . +.Pp +The +.Fn posix_spawnattr_setexecfd_np +function sets the spawn-execfd attribute in an initialized attributes +object referenced by +.Fa attr . +.Pp +The spawn-execfd attribute provides the file descriptor that is used +to execute new image in the spawned process by the +.Xr posix_spawn 3 +family of functions. +If the attribute is set to a value other then \-1, it must be a valid +file descriptor which does not have the +.Va O_CLOFORK +flag set. +Then, +.Fn posix_spawn +executes the executable image referenced by the file descriptor in the +newly created process, using the +.Xr fexecve 2 +system call. +In this case, the +.Fa path +argument to +.Fn posix_spawn +is ignored. +.Pp +The default value for the spawn-execfd attribute is \-1, which +means that the executed image is specified by the +.Fa path +argument to +.Fn posix_spawn . +.Sh RETURN VALUES +The +.Fn posix_spawnattr_getexecfd_np +and +.Fn posix_spawnattr_setexecfd_np +functions return zero. +.Sh SEE ALSO +.Xr posix_spawn 3 , +.Xr posix_spawnattr_destroy 3 , +.Xr posix_spawnattr_init 3 , +.Xr posix_spawnp 3 +.Sh STANDARDS +The +.Fn posix_spawnattr_getexecfd_np +and +.Fn posix_spawnattr_setexecfd_np +are +.Fx +extensions that first appeared in +.Fx 16.0 . diff --git a/lib/libc/gen/posix_spawnattr_getprocdescp_np.3 b/lib/libc/gen/posix_spawnattr_getprocdescp_np.3 new file mode 100644 index 000000000000..fab529d91313 --- /dev/null +++ b/lib/libc/gen/posix_spawnattr_getprocdescp_np.3 @@ -0,0 +1,94 @@ +.\" Copyright 2026 The FreeBSD Foundation +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.\" This documentation was written by +.\" Konstantin Belousov <kib@FreeBSD.org> under sponsorship +.\" from the FreeBSD Foundation. +.\" +.Dd January 26, 2026 +.Dt POSIX_SPAWNATTR_GETPROCDESCP_NP 3 +.Os +.Sh NAME +.Nm posix_spawnattr_getprocdesp_np , +.Nm posix_spawnattr_setprocdescp_np +.Nd "get and set the spawn-procdescp attribute of a spawn attributes object" +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In spawn.h +.Ft int +.Fo posix_spawnattr_getprocdescp_np +.Fa "const posix_spawnattr_t *restrict attr" +.Fa "int **restrict fdpp" +.Fa "int *restrict pdrflagsp" +.Fc +.Ft int +.Fo posix_spawnattr_setprocdescp_np +.Fa "posix_spawnattr_t *attr" +.Fa "int *restrict fdp" +.Fa "int pdrflags" +.Fc +.Sh DESCRIPTION +The +.Fn posix_spawnattr_getprocdescp_np +function obtains the value of the spawn-procdescp attribute from the +attributes object referenced by +.Fa attr . +.Pp +The +.Fn posix_spawnattr_procdescp_np +function sets the spawn-procdescp attribute in an initialized attributes +object referenced by +.Fa attr . +.Pp +The spawn-procdescp attribute provides the location where the child process's +file descriptor will be stored after a successful spawn. +Setting the attribute to a non-NULL value implicitly request the creation of +the file descriptor that references the child process. +It wiil be created by the +.Xr pdrfork 2 +system call. which will be used instead of +.Xr fork/vfork/rfork 2 +when the attribute is set to +.Va NULL. +.Pp +If the attribute is set to a value other then +.Dv NULL , +it must be a valid pointer to a variable of +.Vt int +type, where the resulting descriptor will be stored. +The +.Fa pdrflags +argument specifies additional flags that are accepted by the +.Xr pdfork 2 +system call. +See its description for the list of the valid flags. +Note that the +.Va PD_CLOEXEC +flag is always set, preventing leakage of the process descriptor +into the newly created child. +.Pp +The default value for the spawn-procdescp attribute is +.Dv NULL , +which means that no process descriptor will be created. +.Sh RETURN VALUES +The +.Fn posix_spawnattr_getprocdescp_np +and +.Fn posix_spawnattr_setprocdescp_np +functions return zero. +.Sh SEE ALSO +.Xr posix_spawn 3 , +.Xr posix_spawnattr_destroy 3 , +.Xr posix_spawnattr_init 3 , +.Xr posix_spawnp 3 +.Sh STANDARDS +The +.Fn posix_spawnattr_getprocdescp_np +and +.Fn posix_spawnattr_setprocdescp_np +are +.Fx +extensions that first appeared in +.Fx 16.0 . diff --git a/lib/libc/gen/rewinddir.c b/lib/libc/gen/rewinddir.c index df829e98d138..5cead2adc6d4 100644 --- a/lib/libc/gen/rewinddir.c +++ b/lib/libc/gen/rewinddir.c @@ -43,7 +43,6 @@ void rewinddir(DIR *dirp) { - if (__isthreaded) _pthread_mutex_lock(&dirp->dd_lock); dirp->dd_flags &= ~__DTF_SKIPREAD; /* current contents are invalid */ diff --git a/lib/libc/gen/scandir.c b/lib/libc/gen/scandir.c index fb589f4b36b6..84b20fbafa03 100644 --- a/lib/libc/gen/scandir.c +++ b/lib/libc/gen/scandir.c @@ -232,14 +232,12 @@ scandirat(int dirfd, const char *dirname, struct dirent ***namelist, int alphasort(const struct dirent **d1, const struct dirent **d2) { - return (strcoll((*d1)->d_name, (*d2)->d_name)); } int versionsort(const struct dirent **d1, const struct dirent **d2) { - return (strverscmp((*d1)->d_name, (*d2)->d_name)); } diff --git a/lib/libc/gen/telldir.c b/lib/libc/gen/telldir.c index 1731cc4d7a2c..d71b7bed158c 100644 --- a/lib/libc/gen/telldir.c +++ b/lib/libc/gen/telldir.c @@ -53,7 +53,7 @@ telldir(DIR *dirp) if (__isthreaded) _pthread_mutex_lock(&dirp->dd_lock); - /* + /* * Outline: * 1) If the directory position fits in a packed structure, return that. * 2) Otherwise, see if it's already been recorded in the linked list @@ -95,7 +95,7 @@ telldir(DIR *dirp) LIST_INSERT_HEAD(&dirp->dd_td->td_locq, lp, loc_lqe); } ddloc.i.is_packed = 0; - /* + /* * Technically this assignment could overflow on 32-bit architectures, * but we would get ENOMEM long before that happens. */ diff --git a/lib/libc/gen/uexterr_format.c b/lib/libc/gen/uexterr_format.c index 8d3b458ca9f2..c1974f3c361a 100644 --- a/lib/libc/gen/uexterr_format.c +++ b/lib/libc/gen/uexterr_format.c @@ -9,6 +9,7 @@ */ #include <sys/param.h> +#include <sys/exterr_cat.h> #include <sys/exterrvar.h> #include <exterr.h> #include <stdbool.h> diff --git a/lib/libc/gen/uexterr_gettext.3 b/lib/libc/gen/uexterr_gettext.3 new file mode 100644 index 000000000000..576fe49d0c37 --- /dev/null +++ b/lib/libc/gen/uexterr_gettext.3 @@ -0,0 +1,71 @@ +.\" Copyright 2026 The FreeBSD Foundation +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.\" This documentation was written by +.\" Konstantin Belousov <kib@FreeBSD.org> under sponsorship +.\" from the FreeBSD Foundation. +.\" +.Dd Feburary 17, 2026 +.Dt UEXTERR_GETTEXT +.Os +.Sh NAME +.Nm uexterr_gettext +.Nd "get string representation of the current extended error" +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In exterr.h +.Ft int +.Fo uexterr_gettext +.Fa "char *buffer" +.Fa "size_t buffer_size" +.Fc +.Sh DESCRIPTION +The +.Nm +function fills the buffer pointed to by the +.Fa buffer +pointer with the formatted extended null-terminated +error string, as reported by the +last error from a system call, which returned an extended error. +The capacity of the passed buffer is +.Va buffer_size +bytes. +.Pp +Normally, applications should use the +.Xr err 3 +family of functions to display errors from system calls. +If this is not convenient or even not possible, +for instance for applications with an advanced user interface, the +.Nm +function can be used to fetch the string with the extended error. +.Pp +Note that most parts of the extended errors are directly provided by +the kernel, and as such cannot be localized. +.Pp +See +.Xr exterror 9 +for the description of the extended error facilities. +.Sh RETURN VALUES +The +.Fn +function returns zero. +There are currently no errors defined for the function, +which might change in future. +.Pp +If any error condition is added, it will be reported by returning \-1 +and setting +.Va errno +to the corresponding value. +.Sh SEE ALSO +.Xr errno 3 , +.Xr err 3 , +.Xr exterror 9 +.Sh STANDARDS +The +.Nm +is a +.Fx +extension that first appeared in +.Fx 15.0 . diff --git a/lib/libc/include/libc_private.h b/lib/libc/include/libc_private.h index db4cbc32be35..299629fce2ad 100644 --- a/lib/libc/include/libc_private.h +++ b/lib/libc/include/libc_private.h @@ -252,6 +252,7 @@ enum { INTERPOS__reserved0, /* was distribute_static_tls */ INTERPOS_pdfork, INTERPOS_uexterr_gettext, + INTERPOS_pdwait, INTERPOS_MAX }; diff --git a/lib/libc/net/getnetbydns.c b/lib/libc/net/getnetbydns.c index deca8c58fca5..b9cc29d5bfdb 100644 --- a/lib/libc/net/getnetbydns.c +++ b/lib/libc/net/getnetbydns.c @@ -304,6 +304,9 @@ _dns_getnetbyaddr(void *rval, void *cb_data, va_list ap) for (nn = 4, net2 = net; net2; net2 >>= 8) netbr[--nn] = net2 & 0xff; switch (nn) { + case 4: /* net was all-zero i.e. 0.0.0.0 */ + sprintf(qbuf, "0.0.0.0.in-addr.arpa"); + break; case 3: /* Class A */ sprintf(qbuf, "0.0.0.%u.in-addr.arpa", netbr[3]); break; diff --git a/lib/libc/net/sockatmark.3 b/lib/libc/net/sockatmark.3 index f499eb0b902d..2b6fe2a8907b 100644 --- a/lib/libc/net/sockatmark.3 +++ b/lib/libc/net/sockatmark.3 @@ -108,8 +108,8 @@ is a descriptor for a file, not a socket. .Rs .%T "An Introductory 4.4BSD Interprocess Communication Tutorial" .%A Stuart Sechrest -.Re .%U https://docs.freebsd.org/44doc/psd/20.ipctut/paper.pdf +.Re .Rs .%T "An Advanced 4.4BSD Interprocess Communication Tutorial" .%A Samuel J. Leffler diff --git a/lib/libc/posix1e/mac.conf b/lib/libc/posix1e/mac.conf index 011143abf073..7da9bb8a9638 100644 --- a/lib/libc/posix1e/mac.conf +++ b/lib/libc/posix1e/mac.conf @@ -12,6 +12,7 @@ default_labels file ?biba,?lomac,?mls,?sebsd default_labels ifnet ?biba,?lomac,?mls,?sebsd +default_labels jail ? default_labels process ?biba,?lomac,?mls,?partition,?sebsd default_labels socket ?biba,?lomac,?mls diff --git a/lib/libc/posix1e/mac.conf.5 b/lib/libc/posix1e/mac.conf.5 index 98aa62dd83a7..99d75584a0d7 100644 --- a/lib/libc/posix1e/mac.conf.5 +++ b/lib/libc/posix1e/mac.conf.5 @@ -27,7 +27,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd July 25, 2015 +.Dd January 19. 2026 .Dt MAC.CONF 5 .Os .Sh NAME @@ -79,6 +79,7 @@ and # Default label set to be used by simple MAC applications default_labels file ?biba,?lomac,?mls,?sebsd +default_labels jail ? default_labels ifnet ?biba,?lomac,?mls,?sebsd default_labels process ?biba,?lomac,?mls,?partition,?sebsd default_labels socket ?biba,?lomac,?mls diff --git a/lib/libc/powerpcspe/Makefile.inc b/lib/libc/powerpcspe/Makefile.inc deleted file mode 100644 index 0b5879574480..000000000000 --- a/lib/libc/powerpcspe/Makefile.inc +++ /dev/null @@ -1,5 +0,0 @@ -CFLAGS+= -I${LIBC_SRCTOP}/powerpc - -# Long double is 64-bits -SRCS+=machdep_ldisd.c -SYM_MAPS+=${LIBC_SRCTOP}/powerpc/Symbol.map diff --git a/lib/libc/powerpcspe/gen/Makefile.inc b/lib/libc/powerpcspe/gen/Makefile.inc deleted file mode 100644 index 502f3dc231bf..000000000000 --- a/lib/libc/powerpcspe/gen/Makefile.inc +++ /dev/null @@ -1,5 +0,0 @@ -.include "${LIBC_SRCTOP}/powerpc/gen/Makefile.common" - -SRCS += fabs.S flt_rounds.c fpgetmask.c fpgetround.c \ - fpgetsticky.c fpsetmask.c fpsetround.c \ - _setjmp.S setjmp.S sigsetjmp.S diff --git a/lib/libc/powerpcspe/gen/_setjmp.S b/lib/libc/powerpcspe/gen/_setjmp.S deleted file mode 100644 index f282e0013f97..000000000000 --- a/lib/libc/powerpcspe/gen/_setjmp.S +++ /dev/null @@ -1,115 +0,0 @@ -/*- - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* $NetBSD: _setjmp.S,v 1.1 1997/03/29 20:55:53 thorpej Exp $ */ - -#include <machine/asm.h> -/* - * C library -- _setjmp, _longjmp - * - * _longjmp(a,v) - * will generate a "return(v?v:1)" from the last call to - * _setjmp(a) - * by restoring registers from the stack. - * The previous signal state is NOT restored. - * - * jmpbuf layout: - * +------------+ - * | unused | - * +------------+ - * | unused | - * | | - * | (4 words) | - * | | - * +------------+ - * | saved regs | - * | ... | - */ - -ENTRY(_setjmp) - mflr %r11 - mfcr %r12 - evstdd %r1,24+0*8(%r3) - evstdd %r2,24+1*8(%r3) - evstdd %r11,24+2*8(%r3) - evstdd %r12,24+3*8(%r3) - evstdd %r13,24+4*8(%r3) - evstdd %r14,24+5*8(%r3) - evstdd %r15,24+6*8(%r3) - evstdd %r16,24+7*8(%r3) - evstdd %r17,24+8*8(%r3) - evstdd %r18,24+9*8(%r3) - evstdd %r19,24+10*8(%r3) - evstdd %r20,24+11*8(%r3) - evstdd %r21,24+12*8(%r3) - evstdd %r22,24+13*8(%r3) - evstdd %r23,24+14*8(%r3) - evstdd %r24,24+15*8(%r3) - evstdd %r25,24+16*8(%r3) - evstdd %r26,24+17*8(%r3) - evstdd %r27,24+18*8(%r3) - evstdd %r28,24+19*8(%r3) - evstdd %r29,24+20*8(%r3) - evstdd %r30,24+21*8(%r3) - evstdd %r31,24+22*8(%r3) - - li %r3,0 - blr -END(_setjmp) - -ENTRY(_longjmp) - evldd %r1,24+0*8(%r3) - evldd %r2,24+1*8(%r3) - evldd %r11,24+2*8(%r3) - evldd %r12,24+3*8(%r3) - evldd %r13,24+4*8(%r3) - evldd %r14,24+5*8(%r3) - evldd %r15,24+6*8(%r3) - evldd %r16,24+7*8(%r3) - evldd %r17,24+8*8(%r3) - evldd %r18,24+9*8(%r3) - evldd %r19,24+10*8(%r3) - evldd %r20,24+11*8(%r3) - evldd %r21,24+12*8(%r3) - evldd %r22,24+13*8(%r3) - evldd %r23,24+14*8(%r3) - evldd %r24,24+15*8(%r3) - evldd %r25,24+16*8(%r3) - evldd %r26,24+17*8(%r3) - evldd %r27,24+18*8(%r3) - evldd %r28,24+19*8(%r3) - evldd %r29,24+20*8(%r3) - evldd %r30,24+21*8(%r3) - evldd %r31,24+22*8(%r3) - - mtlr %r11 - mtcr %r12 - or. %r3,%r4,%r4 - bnelr - li %r3,1 - blr -END(_longjmp) - - .section .note.GNU-stack,"",%progbits diff --git a/lib/libc/powerpcspe/gen/flt_rounds.c b/lib/libc/powerpcspe/gen/flt_rounds.c deleted file mode 100644 index 26dfca0e0e3a..000000000000 --- a/lib/libc/powerpcspe/gen/flt_rounds.c +++ /dev/null @@ -1,54 +0,0 @@ -/* $NetBSD: flt_rounds.c,v 1.4.10.3 2002/03/22 20:41:53 nathanw Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe - * for the NetBSD Project. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <sys/types.h> -#include <machine/float.h> -#include <machine/spr.h> - -#ifndef _SOFT_FLOAT -static const int map[] = { - 1, /* round to nearest */ - 0, /* round to zero */ - 2, /* round to positive infinity */ - 3 /* round to negative infinity */ -}; - -int -__flt_rounds() -{ - uint32_t fpscr; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR)); - return map[(fpscr & 0x03)]; -} -#endif diff --git a/lib/libc/powerpcspe/gen/fpgetmask.c b/lib/libc/powerpcspe/gen/fpgetmask.c deleted file mode 100644 index f7679be4ca54..000000000000 --- a/lib/libc/powerpcspe/gen/fpgetmask.c +++ /dev/null @@ -1,46 +0,0 @@ -/* $NetBSD: fpgetmask.c,v 1.3 2002/01/13 21:45:47 thorpej Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dan Winship. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <sys/types.h> -#include <machine/spr.h> -#include <ieeefp.h> - -#ifndef _SOFT_FLOAT -fp_except_t -fpgetmask() -{ - uint32_t fpscr; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR)); - return ((fp_except_t)((fpscr >> 2) & 0x1f)); -} -#endif diff --git a/lib/libc/powerpcspe/gen/fpgetround.c b/lib/libc/powerpcspe/gen/fpgetround.c deleted file mode 100644 index 9c01bcbaf327..000000000000 --- a/lib/libc/powerpcspe/gen/fpgetround.c +++ /dev/null @@ -1,46 +0,0 @@ -/* $NetBSD: fpgetround.c,v 1.3 2002/01/13 21:45:47 thorpej Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dan Winship. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <sys/types.h> -#include <machine/spr.h> -#include <ieeefp.h> - -#ifndef _SOFT_FLOAT -fp_rnd_t -fpgetround() -{ - uint32_t fpscr; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR)); - return ((fp_rnd_t)(fpscr & 0x3)); -} -#endif diff --git a/lib/libc/powerpcspe/gen/fpgetsticky.c b/lib/libc/powerpcspe/gen/fpgetsticky.c deleted file mode 100644 index a97c27296cab..000000000000 --- a/lib/libc/powerpcspe/gen/fpgetsticky.c +++ /dev/null @@ -1,48 +0,0 @@ -/* $NetBSD: fpgetsticky.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dan Winship. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include "namespace.h" - -#include <sys/types.h> -#include <machine/spr.h> -#include <ieeefp.h> - -#ifndef _SOFT_FLOAT -fp_except_t -fpgetsticky() -{ - uint32_t fpscr; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR)); - return ((fp_except_t)((fpscr >> 25) & 0x1f)); -} -#endif diff --git a/lib/libc/powerpcspe/gen/fpsetmask.c b/lib/libc/powerpcspe/gen/fpsetmask.c deleted file mode 100644 index a7a2569df905..000000000000 --- a/lib/libc/powerpcspe/gen/fpsetmask.c +++ /dev/null @@ -1,50 +0,0 @@ -/* $NetBSD: fpsetmask.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dan Winship. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <sys/types.h> -#include <machine/spr.h> -#include <ieeefp.h> - -#ifndef _SOFT_FLOAT -fp_except_t -fpsetmask(fp_except_t mask) -{ - uint32_t fpscr; - fp_except_t old; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR)); - old = (fp_except_t)((fpscr >> 2) & 0x1f); - fpscr = (fpscr & 0xffffff83) | ((mask & 0x1f) << 2); - __asm__ __volatile("mtspr %1,%0;isync" :: "r"(fpscr), "K"(SPR_SPEFSCR)); - return (old); -} -#endif diff --git a/lib/libc/powerpcspe/gen/fpsetround.c b/lib/libc/powerpcspe/gen/fpsetround.c deleted file mode 100644 index 2280e190b2f9..000000000000 --- a/lib/libc/powerpcspe/gen/fpsetround.c +++ /dev/null @@ -1,50 +0,0 @@ -/* $NetBSD: fpsetround.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */ - -/* - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Dan Winship. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <sys/types.h> -#include <machine/spr.h> -#include <ieeefp.h> - -#ifndef _SOFT_FLOAT -fp_rnd_t -fpsetround(fp_rnd_t rnd_dir) -{ - uint32_t fpscr; - fp_rnd_t old; - - __asm__ __volatile("mfspr %0, %1" : "=r"(fpscr) : "K"(SPR_SPEFSCR) ); - old = (fp_rnd_t)(fpscr & 0x3); - fpscr = (fpscr & 0xfffffffc) | rnd_dir; - __asm__ __volatile("mtspr %1, %0;isync" :: "r"(fpscr), "K"(SPR_SPEFSCR)); - return (old); -} -#endif diff --git a/lib/libc/powerpcspe/gen/setjmp.S b/lib/libc/powerpcspe/gen/setjmp.S deleted file mode 100644 index 1bd3edcf5239..000000000000 --- a/lib/libc/powerpcspe/gen/setjmp.S +++ /dev/null @@ -1,136 +0,0 @@ -/*- - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* $NetBSD: setjmp.S,v 1.3 1998/10/03 12:30:38 tsubai Exp $ */ - -#include <machine/asm.h> -#include <sys/syscall.h> - -/* - * C library -- setjmp, longjmp - * - * longjmp(a,v) - * will generate a "return(v?v:1)" from the last call to - * setjmp(a) - * by restoring registers from the stack. - * The previous signal state is restored. - * - * jmpbuf layout: - * +------------+ - * | unused | - * +------------+ - * | sig state | - * | | - * | (4 words) | - * | | - * +------------+ - * | saved regs | - * | ... | - */ - -ENTRY(setjmp) - mr %r6,%r3 - li %r3,1 /* SIG_BLOCK, but doesn't matter */ - /* since set == NULL */ - li %r4,0 /* set = NULL */ - mr %r5,%r6 /* &oset */ - addi %r5,%r5,4 - li %r0, SYS_sigprocmask /*sigprocmask(SIG_BLOCK, NULL, &oset)*/ - sc /*assume no error XXX */ - mflr %r11 /* r11 <- link reg */ - mfcr %r12 /* r12 <- condition reg */ - mr %r10,%r1 /* r10 <- stackptr */ - mr %r9,%r2 /* r9 <- global ptr */ - evstdd %r9,24+0*8(%r6) - evstdd %r10,24+1*8(%r6) - evstdd %r11,24+2*8(%r6) - evstdd %r12,24+3*8(%r6) - evstdd %r13,24+4*8(%r6) - evstdd %r14,24+5*8(%r6) - evstdd %r15,24+6*8(%r6) - evstdd %r16,24+7*8(%r6) - evstdd %r17,24+8*8(%r6) - evstdd %r18,24+9*8(%r6) - evstdd %r19,24+10*8(%r6) - evstdd %r20,24+11*8(%r6) - evstdd %r21,24+12*8(%r6) - evstdd %r22,24+13*8(%r6) - evstdd %r23,24+14*8(%r6) - evstdd %r24,24+15*8(%r6) - evstdd %r25,24+16*8(%r6) - evstdd %r26,24+17*8(%r6) - evstdd %r27,24+18*8(%r6) - evstdd %r28,24+19*8(%r6) - evstdd %r29,24+20*8(%r6) - evstdd %r30,24+21*8(%r6) - evstdd %r31,24+22*8(%r6) - - li %r3,0 /* return (0) */ - blr -END(setjmp) - - WEAK_REFERENCE(CNAME(__longjmp), longjmp) -ENTRY(__longjmp) - evldd %r9,24+0*8(%r3) - evldd %r10,24+1*8(%r3) - evldd %r11,24+2*8(%r3) - evldd %r12,24+3*8(%r3) - evldd %r13,24+4*8(%r3) - evldd %r14,24+5*8(%r3) - evldd %r15,24+6*8(%r3) - evldd %r16,24+7*8(%r3) - evldd %r17,24+8*8(%r3) - evldd %r18,24+9*8(%r3) - evldd %r19,24+10*8(%r3) - evldd %r20,24+11*8(%r3) - evldd %r21,24+12*8(%r3) - evldd %r22,24+13*8(%r3) - evldd %r23,24+14*8(%r3) - evldd %r24,24+15*8(%r3) - evldd %r25,24+16*8(%r3) - evldd %r26,24+17*8(%r3) - evldd %r27,24+18*8(%r3) - evldd %r28,24+19*8(%r3) - evldd %r29,24+20*8(%r3) - evldd %r30,24+21*8(%r3) - evldd %r31,24+22*8(%r3) - - mr %r6,%r4 /* save val param */ - mtlr %r11 /* r11 -> link reg */ - mtcr %r12 /* r12 -> condition reg */ - mr %r1,%r10 /* r10 -> stackptr */ - mr %r4,%r3 - li %r3,3 /* SIG_SETMASK */ - addi %r4,%r4,4 /* &set */ - li %r5,0 /* oset = NULL */ - li %r0,SYS_sigprocmask /* sigprocmask(SIG_SET, &set, NULL) */ - sc /* assume no error XXX */ - or. %r3,%r6,%r6 - bnelr - li %r3,1 - blr -END(__longjmp) - - .section .note.GNU-stack,"",%progbits diff --git a/lib/libc/powerpcspe/gen/sigsetjmp.S b/lib/libc/powerpcspe/gen/sigsetjmp.S deleted file mode 100644 index 45c85c3fce23..000000000000 --- a/lib/libc/powerpcspe/gen/sigsetjmp.S +++ /dev/null @@ -1,148 +0,0 @@ -/*- - * Copyright (c) 2016 Justin Hibbits - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* $NetBSD: sigsetjmp.S,v 1.4 1998/10/03 12:30:38 tsubai Exp $ */ - -#include <machine/asm.h> -/* - * C library -- sigsetjmp, siglongjmp - * - * siglongjmp(a,v) - * will generate a "return(v?v:1)" from the last call to - * sigsetjmp(a, savemask) - * by restoring registers from the stack. - * The previous signal state is restored if savemask is non-zero - * - * jmpbuf layout: - * +------------+ - * | savemask | - * +------------+ - * | sig state | - * | | - * | (4 words) | - * | | - * +------------+ - * | saved regs | - * | ... | - */ - - -#include <sys/syscall.h> - -ENTRY(sigsetjmp) - mr %r6,%r3 - stw %r4,0(%r3) - or. %r7,%r4,%r4 - beq 1f - li %r3,1 /* SIG_BLOCK, but doesn't matter */ - /* since set == NULL */ - li %r4,0 /* set = NULL */ - mr %r5,%r6 /* &oset */ - addi %r5,%r5,4 - li %r0, SYS_sigprocmask /* sigprocmask(SIG_BLOCK, NULL, &oset)*/ - sc /* assume no error XXX */ -1: - mflr %r11 - mfcr %r12 - mr %r10,%r1 - mr %r9,%r2 - - /* FPRs */ - evstdd %r9,24+0*8(%r6) - evstdd %r10,24+1*8(%r6) - evstdd %r11,24+2*8(%r6) - evstdd %r12,24+3*8(%r6) - evstdd %r13,24+4*8(%r6) - evstdd %r14,24+5*8(%r6) - evstdd %r15,24+6*8(%r6) - evstdd %r16,24+7*8(%r6) - evstdd %r17,24+8*8(%r6) - evstdd %r18,24+9*8(%r6) - evstdd %r19,24+10*8(%r6) - evstdd %r20,24+11*8(%r6) - evstdd %r21,24+12*8(%r6) - evstdd %r22,24+13*8(%r6) - evstdd %r23,24+14*8(%r6) - evstdd %r24,24+15*8(%r6) - evstdd %r25,24+16*8(%r6) - evstdd %r26,24+17*8(%r6) - evstdd %r27,24+18*8(%r6) - evstdd %r28,24+19*8(%r6) - evstdd %r29,24+20*8(%r6) - evstdd %r30,24+21*8(%r6) - evstdd %r31,24+22*8(%r6) - - li %r3,0 - blr -END(sigsetjmp) - -ENTRY(siglongjmp) - - /* FPRs */ - evldd %r9,24+0*8(%r3) - evldd %r10,24+1*8(%r3) - evldd %r11,24+2*8(%r3) - evldd %r12,24+3*8(%r3) - evldd %r13,24+4*8(%r3) - evldd %r14,24+5*8(%r3) - evldd %r15,24+6*8(%r3) - evldd %r16,24+7*8(%r3) - evldd %r17,24+8*8(%r3) - evldd %r18,24+9*8(%r3) - evldd %r19,24+10*8(%r3) - evldd %r20,24+11*8(%r3) - evldd %r21,24+12*8(%r3) - evldd %r22,24+13*8(%r3) - evldd %r23,24+14*8(%r3) - evldd %r24,24+15*8(%r3) - evldd %r25,24+16*8(%r3) - evldd %r26,24+17*8(%r3) - evldd %r27,24+18*8(%r3) - evldd %r28,24+19*8(%r3) - evldd %r29,24+20*8(%r3) - evldd %r30,24+21*8(%r3) - evldd %r31,24+22*8(%r3) - - lwz %r7,0(%r3) - mr %r6,%r4 - mtlr %r11 - mtcr %r12 - mr %r1,%r10 - or. %r7,%r7,%r7 - beq 1f - mr %r4,%r3 - li %r3,3 /* SIG_SETMASK */ - addi %r4,%r4,4 /* &set */ - li %r5,0 /* oset = NULL */ - li %r0,SYS_sigprocmask /* sigprocmask(SIG_SET, &set, NULL) */ - sc /* assume no error XXX */ -1: - or. %r3,%r6,%r6 - bnelr - li %r3,1 - blr -END(siglongjmp) - - .section .note.GNU-stack,"",%progbits diff --git a/lib/libc/powerpcspe/softfloat/milieu.h b/lib/libc/powerpcspe/softfloat/milieu.h deleted file mode 100644 index 6139aa58b982..000000000000 --- a/lib/libc/powerpcspe/softfloat/milieu.h +++ /dev/null @@ -1,48 +0,0 @@ -/* $NetBSD: milieu.h,v 1.1 2000/12/29 20:13:54 bjh21 Exp $ */ - -/* -=============================================================================== - -This C header file is part of the SoftFloat IEC/IEEE Floating-point -Arithmetic Package, Release 2a. - -Written by John R. Hauser. This work was made possible in part by the -International Computer Science Institute, located at Suite 600, 1947 Center -Street, Berkeley, California 94704. Funding was partially provided by the -National Science Foundation under grant MIP-9311980. The original version -of this code was written as part of a project to build a fixed-point vector -processor in collaboration with the University of California at Berkeley, -overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ -arithmetic/SoftFloat.html'. - -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort -has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT -TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO -PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY -AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. - -Derivative works are acceptable, even for commercial purposes, so long as -(1) they include prominent notice that the work is derivative, and (2) they -include prominent notice akin to these four paragraphs for those parts of -this code that are retained. - -=============================================================================== -*/ - -/* -------------------------------------------------------------------------------- -Include common integer types and flags. -------------------------------------------------------------------------------- -*/ -#include "powerpc-gcc.h" - -/* -------------------------------------------------------------------------------- -Symbolic Boolean literals. -------------------------------------------------------------------------------- -*/ -enum { - FALSE = 0, - TRUE = 1 -}; diff --git a/lib/libc/powerpcspe/softfloat/powerpc-gcc.h b/lib/libc/powerpcspe/softfloat/powerpc-gcc.h deleted file mode 100644 index d11198866e39..000000000000 --- a/lib/libc/powerpcspe/softfloat/powerpc-gcc.h +++ /dev/null @@ -1,91 +0,0 @@ -/* $NetBSD: arm-gcc.h,v 1.2 2001/02/21 18:09:25 bjh21 Exp $ */ - -/* -------------------------------------------------------------------------------- -One of the macros `BIGENDIAN' or `LITTLEENDIAN' must be defined. -------------------------------------------------------------------------------- -*/ -#define BIGENDIAN - -/* -------------------------------------------------------------------------------- -The macro `BITS64' can be defined to indicate that 64-bit integer types are -supported by the compiler. -------------------------------------------------------------------------------- -*/ -#define BITS64 - -/* -------------------------------------------------------------------------------- -Each of the following `typedef's defines the most convenient type that holds -integers of at least as many bits as specified. For example, `uint8' should -be the most convenient type that can hold unsigned integers of as many as -8 bits. The `flag' type must be able to hold either a 0 or 1. For most -implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed -to the same as `int'. -------------------------------------------------------------------------------- -*/ -typedef int flag; -typedef unsigned int uint8; -typedef int int8; -typedef unsigned int uint16; -typedef int int16; -typedef unsigned int uint32; -typedef signed int int32; -#ifdef BITS64 -typedef unsigned long long int uint64; -typedef signed long long int int64; -#endif - -/* -------------------------------------------------------------------------------- -Each of the following `typedef's defines a type that holds integers -of _exactly_ the number of bits specified. For instance, for most -implementation of C, `bits16' and `sbits16' should be `typedef'ed to -`unsigned short int' and `signed short int' (or `short int'), respectively. -------------------------------------------------------------------------------- -*/ -typedef unsigned char bits8; -typedef signed char sbits8; -typedef unsigned short int bits16; -typedef signed short int sbits16; -typedef unsigned int bits32; -typedef signed int sbits32; -#ifdef BITS64 -typedef unsigned long long int bits64; -typedef signed long long int sbits64; -#endif - -#ifdef BITS64 -/* -------------------------------------------------------------------------------- -The `LIT64' macro takes as its argument a textual integer literal and -if necessary ``marks'' the literal as having a 64-bit integer type. -For example, the GNU C Compiler (`gcc') requires that 64-bit literals be -appended with the letters `LL' standing for `long long', which is `gcc's -name for the 64-bit integer type. Some compilers may allow `LIT64' to be -defined as the identity macro: `#define LIT64( a ) a'. -------------------------------------------------------------------------------- -*/ -#define LIT64( a ) a##LL -#endif - -/* -------------------------------------------------------------------------------- -The macro `INLINE' can be used before functions that should be inlined. If -a compiler does not support explicit inlining, this macro should be defined -to be `static'. -------------------------------------------------------------------------------- -*/ -#define INLINE static __inline - -/* -------------------------------------------------------------------------------- -The ARM FPA is odd in that it stores doubles high-order word first, no matter -what the endianness of the CPU. VFP is sane. -------------------------------------------------------------------------------- -*/ -#if defined(SOFTFLOAT_FOR_GCC) -#define FLOAT64_DEMANGLE(a) (a) -#define FLOAT64_MANGLE(a) (a) -#endif diff --git a/lib/libc/powerpcspe/softfloat/softfloat.h b/lib/libc/powerpcspe/softfloat/softfloat.h deleted file mode 100644 index b20cb3e7aa00..000000000000 --- a/lib/libc/powerpcspe/softfloat/softfloat.h +++ /dev/null @@ -1,306 +0,0 @@ -/* $NetBSD: softfloat.h,v 1.6 2002/05/12 13:12:46 bjh21 Exp $ */ - -/* This is a derivative work. */ - -/* -=============================================================================== - -This C header file is part of the SoftFloat IEC/IEEE Floating-point -Arithmetic Package, Release 2a. - -Written by John R. Hauser. This work was made possible in part by the -International Computer Science Institute, located at Suite 600, 1947 Center -Street, Berkeley, California 94704. Funding was partially provided by the -National Science Foundation under grant MIP-9311980. The original version -of this code was written as part of a project to build a fixed-point vector -processor in collaboration with the University of California at Berkeley, -overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ -arithmetic/SoftFloat.html'. - -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort -has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT -TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO -PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY -AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. - -Derivative works are acceptable, even for commercial purposes, so long as -(1) they include prominent notice that the work is derivative, and (2) they -include prominent notice akin to these four paragraphs for those parts of -this code that are retained. - -=============================================================================== -*/ - -/* -------------------------------------------------------------------------------- -The macro `FLOATX80' must be defined to enable the extended double-precision -floating-point format `floatx80'. If this macro is not defined, the -`floatx80' type will not be defined, and none of the functions that either -input or output the `floatx80' type will be defined. The same applies to -the `FLOAT128' macro and the quadruple-precision format `float128'. -------------------------------------------------------------------------------- -*/ -/* #define FLOATX80 */ -/* #define FLOAT128 */ - -#include <machine/ieeefp.h> - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE floating-point types. -------------------------------------------------------------------------------- -*/ -typedef unsigned int float32; -typedef unsigned long long float64; -#ifdef FLOATX80 -typedef struct { - unsigned short high; - unsigned long long low; -} floatx80; -#endif -#ifdef FLOAT128 -typedef struct { - unsigned long long high, low; -} float128; -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE floating-point underflow tininess-detection mode. -------------------------------------------------------------------------------- -*/ -#ifndef SOFTFLOAT_FOR_GCC -extern int8 float_detect_tininess; -#endif -enum { - float_tininess_after_rounding = 0, - float_tininess_before_rounding = 1 -}; - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE floating-point rounding mode. -------------------------------------------------------------------------------- -*/ -extern fp_rnd_t float_rounding_mode; -enum { - float_round_nearest_even = FP_RN, - float_round_to_zero = FP_RZ, - float_round_down = FP_RM, - float_round_up = FP_RP -}; - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE floating-point exception flags. -------------------------------------------------------------------------------- -*/ -typedef fp_except_t fp_except; - -extern fp_except float_exception_flags; -extern fp_except float_exception_mask; -enum { - float_flag_inexact = FP_X_IMP, - float_flag_underflow = FP_X_UFL, - float_flag_overflow = FP_X_OFL, - float_flag_divbyzero = FP_X_DZ, - float_flag_invalid = FP_X_INV -}; - -/* -------------------------------------------------------------------------------- -Routine to raise any or all of the software IEC/IEEE floating-point -exception flags. -------------------------------------------------------------------------------- -*/ -void float_raise( fp_except ); - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE integer-to-floating-point conversion routines. -------------------------------------------------------------------------------- -*/ -float32 int32_to_float32( int ); -float64 int32_to_float64( int ); -#ifdef FLOATX80 -floatx80 int32_to_floatx80( int ); -#endif -#ifdef FLOAT128 -float128 int32_to_float128( int ); -#endif -float32 int64_to_float32( long long ); -float64 int64_to_float64( long long ); -#ifdef FLOATX80 -floatx80 int64_to_floatx80( long long ); -#endif -#ifdef FLOAT128 -float128 int64_to_float128( long long ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE single-precision conversion routines. -------------------------------------------------------------------------------- -*/ -int float32_to_int32( float32 ); -int float32_to_int32_round_to_zero( float32 ); -unsigned int float32_to_uint32_round_to_zero( float32 ); -long long float32_to_int64( float32 ); -long long float32_to_int64_round_to_zero( float32 ); -float64 float32_to_float64( float32 ); -#ifdef FLOATX80 -floatx80 float32_to_floatx80( float32 ); -#endif -#ifdef FLOAT128 -float128 float32_to_float128( float32 ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE single-precision operations. -------------------------------------------------------------------------------- -*/ -float32 float32_round_to_int( float32 ); -float32 float32_add( float32, float32 ); -float32 float32_sub( float32, float32 ); -float32 float32_mul( float32, float32 ); -float32 float32_div( float32, float32 ); -float32 float32_rem( float32, float32 ); -float32 float32_sqrt( float32 ); -int float32_eq( float32, float32 ); -int float32_le( float32, float32 ); -int float32_lt( float32, float32 ); -int float32_eq_signaling( float32, float32 ); -int float32_le_quiet( float32, float32 ); -int float32_lt_quiet( float32, float32 ); -#ifndef SOFTFLOAT_FOR_GCC -int float32_is_signaling_nan( float32 ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE double-precision conversion routines. -------------------------------------------------------------------------------- -*/ -int float64_to_int32( float64 ); -int float64_to_int32_round_to_zero( float64 ); -unsigned int float64_to_uint32_round_to_zero( float64 ); -long long float64_to_int64( float64 ); -long long float64_to_int64_round_to_zero( float64 ); -float32 float64_to_float32( float64 ); -#ifdef FLOATX80 -floatx80 float64_to_floatx80( float64 ); -#endif -#ifdef FLOAT128 -float128 float64_to_float128( float64 ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE double-precision operations. -------------------------------------------------------------------------------- -*/ -float64 float64_round_to_int( float64 ); -float64 float64_add( float64, float64 ); -float64 float64_sub( float64, float64 ); -float64 float64_mul( float64, float64 ); -float64 float64_div( float64, float64 ); -float64 float64_rem( float64, float64 ); -float64 float64_sqrt( float64 ); -int float64_eq( float64, float64 ); -int float64_le( float64, float64 ); -int float64_lt( float64, float64 ); -int float64_eq_signaling( float64, float64 ); -int float64_le_quiet( float64, float64 ); -int float64_lt_quiet( float64, float64 ); -#ifndef SOFTFLOAT_FOR_GCC -int float64_is_signaling_nan( float64 ); -#endif - -#ifdef FLOATX80 - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE extended double-precision conversion routines. -------------------------------------------------------------------------------- -*/ -int floatx80_to_int32( floatx80 ); -int floatx80_to_int32_round_to_zero( floatx80 ); -long long floatx80_to_int64( floatx80 ); -long long floatx80_to_int64_round_to_zero( floatx80 ); -float32 floatx80_to_float32( floatx80 ); -float64 floatx80_to_float64( floatx80 ); -#ifdef FLOAT128 -float128 floatx80_to_float128( floatx80 ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE extended double-precision rounding precision. Valid -values are 32, 64, and 80. -------------------------------------------------------------------------------- -*/ -extern int floatx80_rounding_precision; - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE extended double-precision operations. -------------------------------------------------------------------------------- -*/ -floatx80 floatx80_round_to_int( floatx80 ); -floatx80 floatx80_add( floatx80, floatx80 ); -floatx80 floatx80_sub( floatx80, floatx80 ); -floatx80 floatx80_mul( floatx80, floatx80 ); -floatx80 floatx80_div( floatx80, floatx80 ); -floatx80 floatx80_rem( floatx80, floatx80 ); -floatx80 floatx80_sqrt( floatx80 ); -int floatx80_eq( floatx80, floatx80 ); -int floatx80_le( floatx80, floatx80 ); -int floatx80_lt( floatx80, floatx80 ); -int floatx80_eq_signaling( floatx80, floatx80 ); -int floatx80_le_quiet( floatx80, floatx80 ); -int floatx80_lt_quiet( floatx80, floatx80 ); -int floatx80_is_signaling_nan( floatx80 ); - -#endif - -#ifdef FLOAT128 - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE quadruple-precision conversion routines. -------------------------------------------------------------------------------- -*/ -int float128_to_int32( float128 ); -int float128_to_int32_round_to_zero( float128 ); -long long float128_to_int64( float128 ); -long long float128_to_int64_round_to_zero( float128 ); -float32 float128_to_float32( float128 ); -float64 float128_to_float64( float128 ); -#ifdef FLOATX80 -floatx80 float128_to_floatx80( float128 ); -#endif - -/* -------------------------------------------------------------------------------- -Software IEC/IEEE quadruple-precision operations. -------------------------------------------------------------------------------- -*/ -float128 float128_round_to_int( float128 ); -float128 float128_add( float128, float128 ); -float128 float128_sub( float128, float128 ); -float128 float128_mul( float128, float128 ); -float128 float128_div( float128, float128 ); -float128 float128_rem( float128, float128 ); -float128 float128_sqrt( float128 ); -int float128_eq( float128, float128 ); -int float128_le( float128, float128 ); -int float128_lt( float128, float128 ); -int float128_eq_signaling( float128, float128 ); -int float128_le_quiet( float128, float128 ); -int float128_lt_quiet( float128, float128 ); -int float128_is_signaling_nan( float128 ); - -#endif - diff --git a/lib/libc/resolv/res_init.c b/lib/libc/resolv/res_init.c index 5a2fce013c8c..b21a3fa1670f 100644 --- a/lib/libc/resolv/res_init.c +++ b/lib/libc/resolv/res_init.c @@ -118,21 +118,12 @@ static u_int32_t net_mask(struct in_addr); /*% * Set up default settings. If the configuration file exist, the values * there will have precedence. Otherwise, the server address is set to - * INADDR_ANY and the default domain name comes from the gethostname(). - * - * An interim version of this code (BIND 4.9, pre-4.4BSD) used 127.0.0.1 - * rather than INADDR_ANY ("0.0.0.0") as the default name server address - * since it was noted that INADDR_ANY actually meant ``the first interface - * you "ifconfig"'d at boot time'' and if this was a SLIP or PPP interface, - * it had to be "up" in order for you to reach your own name server. It - * was later decided that since the recommended practice is to always - * install local static routes through 127.0.0.1 for all your network - * interfaces, that we could solve this problem without a code change. + * the loopback address the default domain name comes from gethostname(). * * The configuration file should always be used, since it is the only way - * to specify a default domain. If you are running a server on your local - * machine, you should say "nameserver 0.0.0.0" or "nameserver 127.0.0.1" - * in the configuration file. + * to specify options and a default domain. If you are running a server + * on your local machine, you should say "nameserver 127.0.0.1" or + * "nameserver ::1" in the configuration file. * * Return 0 if completes successfully, -1 on error */ @@ -146,6 +137,26 @@ res_ninit(res_state statp) { /*% This function has to be reachable by res_data.c but not publicly. */ int __res_vinit(res_state statp, int preinit) { + union res_sockaddr_union u[] = { + { .sin = { + .sin_family = AF_INET, +#ifdef HAVE_SA_LEN + .sin_len = sizeof(struct sockaddr_in), +#endif + .sin_port = htons(NAMESERVER_PORT), + .sin_addr = { htonl(INADDR_LOOPBACK) }, + } }, +#ifdef HAS_INET6_STRUCTS + { .sin6 = { + .sin6_family = AF_INET6, +#ifdef HAVE_SA_LEN + .sin6_len = sizeof(struct sockaddr_in6), +#endif + .sin6_port = htons(NAMESERVER_PORT), + .sin6_addr = IN6ADDR_LOOPBACK_INIT, + } }, +#endif + }; FILE *fp; char *cp, **pp; int n; @@ -158,7 +169,6 @@ __res_vinit(res_state statp, int preinit) { char *net; #endif int dots; - union res_sockaddr_union u[2]; int maxns = MAXNS; RES_SET_H_ERRNO(statp, 0); @@ -173,23 +183,6 @@ __res_vinit(res_state statp, int preinit) { statp->id = res_nrandomid(statp); - memset(u, 0, sizeof(u)); - u[nserv].sin.sin_addr.s_addr = INADDR_ANY; - u[nserv].sin.sin_family = AF_INET; - u[nserv].sin.sin_port = htons(NAMESERVER_PORT); -#ifdef HAVE_SA_LEN - u[nserv].sin.sin_len = sizeof(struct sockaddr_in); -#endif - nserv++; -#ifdef HAS_INET6_STRUCTS - u[nserv].sin6.sin6_addr = in6addr_any; - u[nserv].sin6.sin6_family = AF_INET6; - u[nserv].sin6.sin6_port = htons(NAMESERVER_PORT); -#ifdef HAVE_SA_LEN - u[nserv].sin6.sin6_len = sizeof(struct sockaddr_in6); -#endif - nserv++; -#endif statp->nscount = 0; statp->ndots = 1; statp->pfcode = 0; @@ -224,7 +217,7 @@ __res_vinit(res_state statp, int preinit) { #ifdef RESOLVSORT statp->nsort = 0; #endif - res_setservers(statp, u, nserv); + res_setservers(statp, u, nitems(u)); #ifdef SOLARIS2 /* @@ -288,7 +281,6 @@ __res_vinit(res_state statp, int preinit) { (line[sizeof(name) - 1] == ' ' || \ line[sizeof(name) - 1] == '\t')) - nserv = 0; if ((fp = fopen(_PATH_RESCONF, "re")) != NULL) { struct stat sb; struct timespec now; @@ -507,15 +499,8 @@ __res_vinit(res_state statp, int preinit) { #endif (void) fclose(fp); } -/* - * Last chance to get a nameserver. This should not normally - * be necessary - */ -#ifdef NO_RESOLV_CONF - if(nserv == 0) - nserv = get_nameservers(statp); -#endif + /* guess default domain if not set */ if (statp->defdname[0] == 0 && gethostname(buf, sizeof(statp->defdname) - 1) == 0 && (cp = strchr(buf, '.')) != NULL) diff --git a/lib/libc/riscv/gen/makecontext.c b/lib/libc/riscv/gen/makecontext.c index e5371d082b2c..8a6498167f13 100644 --- a/lib/libc/riscv/gen/makecontext.c +++ b/lib/libc/riscv/gen/makecontext.c @@ -80,7 +80,8 @@ __makecontext(ucontext_t *ucp, void (*func)(void), int argc, ...) va_end(ap); /* Set the stack */ - gp->gp_sp = STACKALIGN(ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size); + gp->gp_sp = STACKALIGN((uintptr_t)ucp->uc_stack.ss_sp + + ucp->uc_stack.ss_size); /* Arrange for return via the trampoline code. */ gp->gp_sepc = (__register_t)_ctx_start; gp->gp_s[0] = (__register_t)func; diff --git a/lib/libc/rpc/rpc.3 b/lib/libc/rpc/rpc.3 index 12d19df117ad..7ae3ec5c5aeb 100644 --- a/lib/libc/rpc/rpc.3 +++ b/lib/libc/rpc/rpc.3 @@ -254,9 +254,9 @@ enum xdr_op { * structure of the data type to be decoded. If this points to 0, * then the type routines should allocate dynamic storage of the * appropriate size and return it. - * bool_t (*xdrproc_t)(XDR *, caddr_t *); + * bool_t (*xdrproc_t)(XDR *, void *); */ -typedef bool_t (*xdrproc_t)(); +typedef bool_t (*xdrproc_t)(XDR *, void *); /* * The XDR handle. diff --git a/lib/libc/rpc/rpc_soc.c b/lib/libc/rpc/rpc_soc.c index c63b89594ce6..24a19624d366 100644 --- a/lib/libc/rpc/rpc_soc.c +++ b/lib/libc/rpc/rpc_soc.c @@ -72,7 +72,8 @@ static CLIENT *clnt_com_create(struct sockaddr_in *, rpcprog_t, rpcvers_t, int *, u_int, u_int, char *); static SVCXPRT *svc_com_create(int, u_int, u_int, char *); -static bool_t rpc_wrap_bcast(char *, struct netbuf *, struct netconfig *); +static bool_t rpc_wrap_bcast(char *, const struct netbuf *, + const struct netconfig *); /* XXX */ #define IN4_LOCALHOST_STRING "127.0.0.1" @@ -307,19 +308,10 @@ registerrpc(int prognum, int versnum, int procnum, } /* - * All the following clnt_broadcast stuff is convulated; it supports - * the earlier calling style of the callback function + * Support the earlier calling style of the callback function with a + * per-thread temporary copy of the real callback. */ -static thread_key_t clnt_broadcast_key; -static resultproc_t clnt_broadcast_result_main; -static once_t clnt_broadcast_once = ONCE_INITIALIZER; - -static void -clnt_broadcast_key_init(void) -{ - - thr_keycreate(&clnt_broadcast_key, free); -} +static _Thread_local clnt_broadcast_resultproc_t clnt_broadcast_result; /* * Need to translate the netbuf address into sockaddr_in address. @@ -327,21 +319,16 @@ clnt_broadcast_key_init(void) */ /* ARGSUSED */ static bool_t -rpc_wrap_bcast(char *resultp, struct netbuf *addr, struct netconfig *nconf) +rpc_wrap_bcast(char *resultp, const struct netbuf *addr, + const struct netconfig *nconf) /* * char *resultp; // results of the call * struct netbuf *addr; // address of the guy who responded * struct netconfig *nconf; // Netconf of the transport */ { - resultproc_t clnt_broadcast_result; - if (strcmp(nconf->nc_netid, "udp")) return (FALSE); - if (thr_main()) - clnt_broadcast_result = clnt_broadcast_result_main; - else - clnt_broadcast_result = (resultproc_t)thr_getspecific(clnt_broadcast_key); return (*clnt_broadcast_result)(resultp, (struct sockaddr_in *)addr->buf); } @@ -351,7 +338,8 @@ rpc_wrap_bcast(char *resultp, struct netbuf *addr, struct netconfig *nconf) */ enum clnt_stat clnt_broadcast(u_long prog, u_long vers, u_long proc, xdrproc_t xargs, - void *argsp, xdrproc_t xresults, void *resultsp, resultproc_t eachresult) + void *argsp, xdrproc_t xresults, void *resultsp, + clnt_broadcast_resultproc_t eachresult) /* * u_long prog; // program number * u_long vers; // version number @@ -363,16 +351,16 @@ clnt_broadcast(u_long prog, u_long vers, u_long proc, xdrproc_t xargs, * resultproc_t eachresult; // call with each result obtained */ { + enum clnt_stat ret; - if (thr_main()) - clnt_broadcast_result_main = eachresult; - else { - thr_once(&clnt_broadcast_once, clnt_broadcast_key_init); - thr_setspecific(clnt_broadcast_key, (void *) eachresult); - } - return rpc_broadcast((rpcprog_t)prog, (rpcvers_t)vers, + clnt_broadcast_result = eachresult; + + ret = rpc_broadcast((rpcprog_t)prog, (rpcvers_t)vers, (rpcproc_t)proc, xargs, argsp, xresults, resultsp, - (resultproc_t) rpc_wrap_bcast, "udp"); + rpc_wrap_bcast, "udp"); + + clnt_broadcast_result = NULL; + return (ret); } /* diff --git a/lib/libc/stdio/printf-pos.c b/lib/libc/stdio/printf-pos.c index edbd4e379699..4fa99316321c 100644 --- a/lib/libc/stdio/printf-pos.c +++ b/lib/libc/stdio/printf-pos.c @@ -311,11 +311,9 @@ reswitch: switch (ch) { goto rflag; } goto reswitch; -#ifndef NO_FLOATING_POINT case 'L': flags |= LONGDBL; goto rflag; -#endif case 'h': if (flags & SHORTINT) { flags &= ~SHORTINT; @@ -359,7 +357,6 @@ reswitch: switch (ch) { if ((error = addsarg(&types, flags))) goto error; break; -#ifndef NO_FLOATING_POINT case 'a': case 'A': case 'e': @@ -372,7 +369,6 @@ reswitch: switch (ch) { if (error) goto error; break; -#endif /* !NO_FLOATING_POINT */ case 'n': if (flags & INTMAXT) error = addtype(&types, TP_INTMAXT); @@ -504,11 +500,9 @@ reswitch: switch (ch) { goto rflag; } goto reswitch; -#ifndef NO_FLOATING_POINT case 'L': flags |= LONGDBL; goto rflag; -#endif case 'h': if (flags & SHORTINT) { flags &= ~SHORTINT; @@ -552,7 +546,6 @@ reswitch: switch (ch) { if ((error = addsarg(&types, flags))) goto error; break; -#ifndef NO_FLOATING_POINT case 'a': case 'A': case 'e': @@ -565,7 +558,6 @@ reswitch: switch (ch) { if (error) goto error; break; -#endif /* !NO_FLOATING_POINT */ case 'n': if (flags & INTMAXT) error = addtype(&types, TP_INTMAXT); @@ -744,14 +736,10 @@ build_arg_table(struct typetable *types, va_list ap, union arg **argtable) (*argtable) [n].pintmaxarg = va_arg (ap, intmax_t *); break; case T_DOUBLE: -#ifndef NO_FLOATING_POINT (*argtable) [n].doublearg = va_arg (ap, double); -#endif break; case T_LONG_DOUBLE: -#ifndef NO_FLOATING_POINT (*argtable) [n].longdoublearg = va_arg (ap, long double); -#endif break; case TP_CHAR: (*argtable) [n].pchararg = va_arg (ap, char *); diff --git a/lib/libc/stdio/printfcommon.h b/lib/libc/stdio/printfcommon.h index 411b778dc234..9de2783c4804 100644 --- a/lib/libc/stdio/printfcommon.h +++ b/lib/libc/stdio/printfcommon.h @@ -43,7 +43,6 @@ */ -#ifndef NO_FLOATING_POINT #define dtoa __dtoa #define freedtoa __freedtoa @@ -57,7 +56,6 @@ static int exponent(CHAR *, int, CHAR); -#endif /* !NO_FLOATING_POINT */ static CHAR *__ujtoa(uintmax_t, CHAR *, int, int, const char *); static CHAR *__ultoa(u_long, CHAR *, int, int, const char *); @@ -280,7 +278,6 @@ __ujtoa(uintmax_t val, CHAR *endp, int base, int octzero, const char *xdigs) return (cp); } -#ifndef NO_FLOATING_POINT static int exponent(CHAR *p0, int exp, CHAR fmtch) @@ -318,4 +315,3 @@ exponent(CHAR *p0, int exp, CHAR fmtch) return (p - p0); } -#endif /* !NO_FLOATING_POINT */ diff --git a/lib/libc/stdio/printflocal.h b/lib/libc/stdio/printflocal.h index f3d0d3e9e216..f3bedcda6678 100644 --- a/lib/libc/stdio/printflocal.h +++ b/lib/libc/stdio/printflocal.h @@ -82,10 +82,8 @@ union arg { ptrdiff_t *pptrdiffarg; ssize_t *pssizearg; intmax_t *pintmaxarg; -#ifndef NO_FLOATING_POINT double doublearg; long double longdoublearg; -#endif wint_t wintarg; wchar_t *pwchararg; }; diff --git a/lib/libc/stdio/vfprintf.c b/lib/libc/stdio/vfprintf.c index 2dc8d9022179..a4633ce7837f 100644 --- a/lib/libc/stdio/vfprintf.c +++ b/lib/libc/stdio/vfprintf.c @@ -314,7 +314,6 @@ __vfprintf(FILE *fp, locale_t locale, int serrno, const char *fmt0, va_list ap) char sign; /* sign prefix (' ', '+', '-', or \0) */ struct grouping_state gs; /* thousands' grouping info */ -#ifndef NO_FLOATING_POINT /* * We can decompose the printed representation of floating * point numbers into several parts, some of which may be empty: @@ -343,7 +342,6 @@ __vfprintf(FILE *fp, locale_t locale, int serrno, const char *fmt0, va_list ap) int ndig; /* actual number of digits returned by dtoa */ char expstr[MAXEXPDIG+2]; /* buffer for exponent string: e+ZZZ */ char *dtoaresult; /* buffer allocated by dtoa */ -#endif u_long ulval; /* integer arguments %[diouxX] */ uintmax_t ujval; /* %j, %ll, %q, %t, %z integers */ int base; /* base for [diouxX] conversion */ @@ -465,12 +463,10 @@ __vfprintf(FILE *fp, locale_t locale, int serrno, const char *fmt0, va_list ap) va_copy(orgap, ap); io_init(&io, fp); ret = 0; -#ifndef NO_FLOATING_POINT dtoaresult = NULL; decimal_point = localeconv_l(locale)->decimal_point; /* The overwhelmingly common case is decpt_len == 1. */ decpt_len = (decimal_point[1] == '\0' ? 1 : strlen(decimal_point)); -#endif /* * Scan the format for conversions (`%' character). @@ -574,11 +570,9 @@ reswitch: switch (ch) { } width = n; goto reswitch; -#ifndef NO_FLOATING_POINT case 'L': flags |= LONGDBL; goto rflag; -#endif case 'h': if (flags & SHORTINT) { flags &= ~SHORTINT; @@ -704,7 +698,6 @@ reswitch: switch (ch) { } base = 10; goto number; -#ifndef NO_FLOATING_POINT case 'a': case 'A': if (ch == 'a') { @@ -823,7 +816,6 @@ fp_common: size += grouping_init(&gs, expt, locale); } break; -#endif /* !NO_FLOATING_POINT */ case 'm': error = __strerror_rl(serrno, errnomsg, sizeof(errnomsg), locale); @@ -1023,9 +1015,7 @@ invalid: PAD(width - realsz, zeroes); /* the string or number proper */ -#ifndef NO_FLOATING_POINT if ((flags & FPT) == 0) { -#endif /* leading zeroes from decimal precision */ PAD(dprec - size, zeroes); if (gs.grouping) { @@ -1034,7 +1024,6 @@ invalid: } else { PRINT(cp, size); } -#ifndef NO_FLOATING_POINT } else { /* glue together f_p fragments */ if (!expchar) { /* %[fF] or sufficiently short %[gG] */ if (expt <= 0) { @@ -1071,7 +1060,6 @@ invalid: PRINT(expstr, expsize); } } -#endif /* left-adjusting padding (always blank) */ if (flags & LADJUST) PAD(width - realsz, blanks); @@ -1085,10 +1073,8 @@ done: FLUSH(); error: va_end(orgap); -#ifndef NO_FLOATING_POINT if (dtoaresult != NULL) freedtoa(dtoaresult); -#endif if (convbuf != NULL) free(convbuf); if (__sferror(fp)) diff --git a/lib/libc/stdio/vfscanf.c b/lib/libc/stdio/vfscanf.c index 89e9e843969f..576fe7d80485 100644 --- a/lib/libc/stdio/vfscanf.c +++ b/lib/libc/stdio/vfscanf.c @@ -56,9 +56,7 @@ #include "local.h" #include "xlocale_private.h" -#ifndef NO_FLOATING_POINT #include <locale.h> -#endif #define BUF 513 /* Maximum length of numeric string. */ @@ -89,9 +87,7 @@ #define CT_FLOAT 4 /* %[efgEFG] conversion */ static const u_char *__sccl(char *, const u_char *); -#ifndef NO_FLOATING_POINT static int parsefloat(FILE *, char *, char *, locale_t); -#endif __weak_reference(__vfscanf, vfscanf); @@ -648,12 +644,10 @@ literal: base = 16; break; -#ifndef NO_FLOATING_POINT case 'A': case 'E': case 'F': case 'G': case 'a': case 'e': case 'f': case 'g': c = CT_FLOAT; break; -#endif case 'S': flags |= LONG; @@ -835,7 +829,6 @@ literal: } break; -#ifndef NO_FLOATING_POINT case CT_FLOAT: /* scan a floating point number as if by strtod */ if (width == 0 || width > sizeof(buf) - 1) @@ -858,7 +851,6 @@ literal: } } break; -#endif /* !NO_FLOATING_POINT */ } if (!(flags & SUPPRESS)) nassigned++; @@ -984,7 +976,6 @@ doswitch: /* NOTREACHED */ } -#ifndef NO_FLOATING_POINT static int parsefloat(FILE *fp, char *buf, char *end, locale_t locale) { @@ -1153,4 +1144,3 @@ parsedone: *++commit = '\0'; return (commit - buf); } -#endif diff --git a/lib/libc/stdio/vfwprintf.c b/lib/libc/stdio/vfwprintf.c index 0d77bd74567e..f5324915ec3e 100644 --- a/lib/libc/stdio/vfwprintf.c +++ b/lib/libc/stdio/vfwprintf.c @@ -389,7 +389,6 @@ __vfwprintf(FILE *fp, locale_t locale, const wchar_t *fmt0, va_list ap) int prec; /* precision from format; <0 for N/A */ wchar_t sign; /* sign prefix (' ', '+', '-', or \0) */ struct grouping_state gs; /* thousands' grouping info */ -#ifndef NO_FLOATING_POINT /* * We can decompose the printed representation of floating * point numbers into several parts, some of which may be empty: @@ -417,7 +416,6 @@ __vfwprintf(FILE *fp, locale_t locale, const wchar_t *fmt0, va_list ap) int ndig; /* actual number of digits returned by dtoa */ wchar_t expstr[MAXEXPDIG+2]; /* buffer for exponent string: e+ZZZ */ char *dtoaresult; /* buffer allocated by dtoa */ -#endif u_long ulval; /* integer arguments %[diouxX] */ uintmax_t ujval; /* %j, %ll, %q, %t, %z integers */ int base; /* base for [diouxX] conversion */ @@ -537,9 +535,7 @@ __vfwprintf(FILE *fp, locale_t locale, const wchar_t *fmt0, va_list ap) va_copy(orgap, ap); io_init(&io, fp); ret = 0; -#ifndef NO_FLOATING_POINT decimal_point = get_decpt(locale); -#endif /* * Scan the format for conversions (`%' character). @@ -643,11 +639,9 @@ reswitch: switch (ch) { } width = n; goto reswitch; -#ifndef NO_FLOATING_POINT case 'L': flags |= LONGDBL; goto rflag; -#endif case 'h': if (flags & SHORTINT) { flags &= ~SHORTINT; @@ -761,7 +755,6 @@ reswitch: switch (ch) { } base = 10; goto number; -#ifndef NO_FLOATING_POINT case 'a': case 'A': if (ch == 'a') { @@ -885,7 +878,6 @@ fp_common: size += grouping_init(&gs, expt, locale); } break; -#endif /* !NO_FLOATING_POINT */ case 'n': /* * Assignment-like behavior is specified if the @@ -1080,9 +1072,7 @@ invalid: PAD(width - realsz, zeroes); /* the string or number proper */ -#ifndef NO_FLOATING_POINT if ((flags & FPT) == 0) { -#endif /* leading zeroes from decimal precision */ PAD(dprec - size, zeroes); if (gs.grouping) { @@ -1091,7 +1081,6 @@ invalid: } else { PRINT(cp, size); } -#ifndef NO_FLOATING_POINT } else { /* glue together f_p fragments */ if (!expchar) { /* %[fF] or sufficiently short %[gG] */ if (expt <= 0) { @@ -1129,7 +1118,6 @@ invalid: PRINT(expstr, expsize); } } -#endif /* left-adjusting padding (always blank) */ if (flags & LADJUST) PAD(width - realsz, blanks); diff --git a/lib/libc/stdio/vfwscanf.c b/lib/libc/stdio/vfwscanf.c index 7ca64eb37811..dbe7c6c48c98 100644 --- a/lib/libc/stdio/vfwscanf.c +++ b/lib/libc/stdio/vfwscanf.c @@ -84,9 +84,7 @@ #define CT_INT 3 /* %[dioupxX] conversion */ #define CT_FLOAT 4 /* %[efgEFG] conversion */ -#ifndef NO_FLOATING_POINT static int parsefloat(FILE *, wchar_t *, wchar_t *, locale_t); -#endif struct ccl { const wchar_t *start; /* character class start */ @@ -630,12 +628,10 @@ literal: base = 16; break; -#ifndef NO_FLOATING_POINT case 'A': case 'E': case 'F': case 'G': case 'a': case 'e': case 'f': case 'g': c = CT_FLOAT; break; -#endif case 'S': flags |= LONG; @@ -813,7 +809,6 @@ literal: } break; -#ifndef NO_FLOATING_POINT case CT_FLOAT: /* scan a floating point number as if by strtod */ if (width == 0 || width > sizeof(buf) / @@ -835,7 +830,6 @@ literal: } } break; -#endif /* !NO_FLOATING_POINT */ } if (!(flags & SUPPRESS)) nassigned++; @@ -848,7 +842,6 @@ match_failure: return (nassigned); } -#ifndef NO_FLOATING_POINT static int parsefloat(FILE *fp, wchar_t *buf, wchar_t *end, locale_t locale) { @@ -1007,4 +1000,3 @@ parsedone: *++commit = '\0'; return (commit - buf); } -#endif diff --git a/lib/libc/stdio/xprintf.c b/lib/libc/stdio/xprintf.c index a5671db67f58..4af0425792b0 100644 --- a/lib/libc/stdio/xprintf.c +++ b/lib/libc/stdio/xprintf.c @@ -60,10 +60,8 @@ union arg { int intarg; long longarg; intmax_t intmaxarg; -#ifndef NO_FLOATING_POINT double doublearg; long double longdoublearg; -#endif wint_t wintarg; char *pchararg; wchar_t *pwchararg; @@ -497,14 +495,10 @@ __v2printf(FILE *fp, const char *fmt0, unsigned pct, va_list ap) args[ch].pwchararg = va_arg (ap, wchar_t *); break; case PA_DOUBLE: -#ifndef NO_FLOATING_POINT args[ch].doublearg = va_arg (ap, double); -#endif break; case PA_DOUBLE | PA_FLAG_LONG_DOUBLE: -#ifndef NO_FLOATING_POINT args[ch].longdoublearg = va_arg (ap, long double); -#endif break; default: errx(1, "argtype = %x (fmt = \"%s\")\n", diff --git a/lib/libc/stdlib/Makefile.inc b/lib/libc/stdlib/Makefile.inc index c311ba3d2bcc..c2107fdaeaae 100644 --- a/lib/libc/stdlib/Makefile.inc +++ b/lib/libc/stdlib/Makefile.inc @@ -142,6 +142,7 @@ MLINKS+=strtod.3 strtof.3 \ MLINKS+=strtol.3 strtoll.3 \ strtol.3 strtoq.3 \ strtol.3 strtoimax.3 +MLINKS+=strtonum.3 strtonumx.3 MLINKS+=strtoul.3 strtoull.3 \ strtoul.3 strtouq.3 \ strtoul.3 strtoumax.3 diff --git a/lib/libc/stdlib/Symbol.map b/lib/libc/stdlib/Symbol.map index 03a6d0b543ac..373006b4a388 100644 --- a/lib/libc/stdlib/Symbol.map +++ b/lib/libc/stdlib/Symbol.map @@ -134,6 +134,7 @@ FBSD_1.8 { FBSD_1.9 { memalignment; recallocarray; + strtonumx; tdestroy; }; diff --git a/lib/libc/stdlib/div.3 b/lib/libc/stdlib/div.3 index 55c1bd107cb7..87b9665684fb 100644 --- a/lib/libc/stdlib/div.3 +++ b/lib/libc/stdlib/div.3 @@ -27,7 +27,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd November 14, 2001 +.Dd February 12, 2026 .Dt DIV 3 .Os .Sh NAME @@ -38,21 +38,23 @@ .Sh SYNOPSIS .In stdlib.h .Ft div_t -.Fn div "int num" "int denom" +.Fn div "int numer" "int denom" .Sh DESCRIPTION The .Fn div function computes the value -.Fa num/denom -and returns the quotient and remainder in a structure named +.Fa numer Ns / Ns Fa denom +(numerator/denominator). +It returns a structure named .Fa div_t that contains two .Vt int members named .Va quot -and -.Va rem . +(quotient) and +.Va rem +(remainder). .Sh SEE ALSO .Xr imaxdiv 3 , .Xr ldiv 3 , diff --git a/lib/libc/stdlib/div.c b/lib/libc/stdlib/div.c index 351dca870553..cdc6e1922060 100644 --- a/lib/libc/stdlib/div.c +++ b/lib/libc/stdlib/div.c @@ -35,12 +35,12 @@ #include <stdlib.h> /* div_t */ div_t -div(int num, int denom) +div(int numer, int denom) { div_t r; - r.quot = num / denom; - r.rem = num % denom; + r.quot = numer / denom; + r.rem = numer % denom; return (r); } diff --git a/lib/libc/stdlib/getopt.3 b/lib/libc/stdlib/getopt.3 index a5b5bff9d1a7..1b40f6dfea7e 100644 --- a/lib/libc/stdlib/getopt.3 +++ b/lib/libc/stdlib/getopt.3 @@ -27,7 +27,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd June 5, 2014 +.Dd December 14, 2025 .Dt GETOPT 3 .Os .Sh NAME @@ -60,30 +60,49 @@ if it has been specified in the string of accepted option characters, The option string .Fa optstring may contain the following elements: individual characters, and -characters followed by a colon to indicate an option argument -is to follow. -If an individual character is followed by two colons, then the -option argument is optional; +characters followed by a colon +.Pq Ql \&: +to indicate an option argument is to follow. +If an individual character is followed by two colons +.Pq Ql \&:\&: , +then the option argument is optional; .Va optarg is set to the rest of the current -.Va argv +.Fa argv word, or .Dv NULL if there were no more characters in the current word. -This is a -.Tn GNU -extension. +This is an extension not covered by POSIX. +.Pp For example, an option string .Li \&"x" recognizes an option -.Dq Fl x , -and an option string +.Dq Fl x . +.Pp +An option string .Li \&"x:" -recognizes an option and argument -.Dq Fl x Ar argument . +recognizes an option with an argument, both +.Dq Fl x Ns Ar arg\^ , +and +.Dq Fl x Ar arg\^ . It does not matter to .Fn getopt -if a following argument has leading white space. +if the option's argument is a separate word or not. +.Pp +An option string +.Li \&"x::" +recognizes the option both without an argument +.Dq Fl x , +and with an argument +.Dq Fl x Ns Ar arg\^ . +In the latter case the argument must be part of the same +.Fa argv +word. +The +.Dq Fl x +and +.Dq Ar arg\^ +must not be separated by a whitespace on the command line. .Pp On return from .Fn getopt , @@ -267,7 +286,7 @@ Care should be taken not to use as the first character in .Fa optstring to avoid a semantic conflict with -.Tn GNU +GNU .Fn getopt , which assigns different meaning to an .Fa optstring diff --git a/lib/libc/stdlib/imaxdiv.3 b/lib/libc/stdlib/imaxdiv.3 index 1553a81edae2..9e51c47b53c3 100644 --- a/lib/libc/stdlib/imaxdiv.3 +++ b/lib/libc/stdlib/imaxdiv.3 @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd November 14, 2001 +.Dd February 12, 2026 .Dt IMAXDIV 3 .Os .Sh NAME @@ -39,9 +39,9 @@ The .Fn imaxdiv function computes the value of .Fa numer -divided by +(numerator) divided by .Fa denom -and returns the stored result in the form of the +(denominator) and returns the stored result in the form of the .Vt imaxdiv_t type. .Pp diff --git a/lib/libc/stdlib/imaxdiv.c b/lib/libc/stdlib/imaxdiv.c index bf9737a3c47a..a8b4df2f3904 100644 --- a/lib/libc/stdlib/imaxdiv.c +++ b/lib/libc/stdlib/imaxdiv.c @@ -28,7 +28,6 @@ #include <inttypes.h> -/* See comments in div.c for implementation details. */ imaxdiv_t imaxdiv(intmax_t numer, intmax_t denom) { diff --git a/lib/libc/stdlib/ldiv.3 b/lib/libc/stdlib/ldiv.3 index 66abb00d4d6c..c2ab444bdcab 100644 --- a/lib/libc/stdlib/ldiv.3 +++ b/lib/libc/stdlib/ldiv.3 @@ -29,7 +29,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd April 3, 2022 +.Dd February 12, 2026 .Dt LDIV 3 .Os .Sh NAME @@ -40,21 +40,23 @@ .Sh SYNOPSIS .In stdlib.h .Ft ldiv_t -.Fn ldiv "long num" "long denom" +.Fn ldiv "long numer" "long denom" .Sh DESCRIPTION The .Fn ldiv function computes the value -.Fa num Ns / Ns Fa denom -and returns the quotient and remainder in a structure named +.Fa numer Ns / Ns Fa denom +(numerator/denominator). +It returns the quotient and remainder in a structure named .Vt ldiv_t that contains two .Vt long members named .Va quot -and -.Va rem . +(quotient) and +.Va rem +(remainder). .Sh SEE ALSO .Xr div 3 , .Xr imaxdiv 3 , diff --git a/lib/libc/stdlib/ldiv.c b/lib/libc/stdlib/ldiv.c index 4c73bcc14af4..4e92c56dd3e2 100644 --- a/lib/libc/stdlib/ldiv.c +++ b/lib/libc/stdlib/ldiv.c @@ -35,14 +35,12 @@ #include <stdlib.h> /* ldiv_t */ ldiv_t -ldiv(long num, long denom) +ldiv(long numer, long denom) { ldiv_t r; - /* see div.c for comments */ - - r.quot = num / denom; - r.rem = num % denom; + r.quot = numer / denom; + r.rem = numer % denom; return (r); } diff --git a/lib/libc/stdlib/lldiv.3 b/lib/libc/stdlib/lldiv.3 index d1de4e9234e3..783ea3df6554 100644 --- a/lib/libc/stdlib/lldiv.3 +++ b/lib/libc/stdlib/lldiv.3 @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd November 14, 2001 +.Dd February 12, 2026 .Dt LLDIV 3 .Os .Sh NAME @@ -39,9 +39,9 @@ The .Fn lldiv function computes the value of .Fa numer -divided by +(numerator) divided by .Fa denom -and returns the stored result in the form of the +(denominator) and returns the stored result in the form of the .Vt lldiv_t type. .Pp diff --git a/lib/libc/stdlib/lldiv.c b/lib/libc/stdlib/lldiv.c index 6feeb74bacd6..cdce0a4f6a27 100644 --- a/lib/libc/stdlib/lldiv.c +++ b/lib/libc/stdlib/lldiv.c @@ -28,7 +28,6 @@ #include <stdlib.h> -/* See comments in div.c for implementation details. */ lldiv_t lldiv(long long numer, long long denom) { diff --git a/lib/libc/stdlib/reallocarray.3 b/lib/libc/stdlib/reallocarray.3 index 9a2ab5c7a840..d72513c2f301 100644 --- a/lib/libc/stdlib/reallocarray.3 +++ b/lib/libc/stdlib/reallocarray.3 @@ -176,4 +176,4 @@ The function first appeared in .Ox 6.1 and -.Fx 16.0 . +.Fx 15.1 . diff --git a/lib/libc/stdlib/strtonum.3 b/lib/libc/stdlib/strtonum.3 index 2650d147e7cc..7e6111a6ff71 100644 --- a/lib/libc/stdlib/strtonum.3 +++ b/lib/libc/stdlib/strtonum.3 @@ -1,4 +1,5 @@ .\" Copyright (c) 2004 Ted Unangst +.\" Copyright 2023 Oxide Computer Company .\" .\" Permission to use, copy, modify, and distribute this software for any .\" purpose with or without fee is hereby granted, provided that the above @@ -14,11 +15,12 @@ .\" .\" $OpenBSD: strtonum.3,v 1.13 2006/04/25 05:15:42 tedu Exp $ .\" -.Dd April 29, 2004 +.Dd January 15, 2026 .Dt STRTONUM 3 .Os .Sh NAME -.Nm strtonum +.Nm strtonum , +.Nm strtonumx .Nd "reliably convert string value to an integer" .Sh SYNOPSIS .In stdlib.h @@ -29,26 +31,33 @@ .Fa "long long maxval" .Fa "const char **errstr" .Fc +.Ft long long +.Fo strtonumx +.Fa "const char *nptr" +.Fa "long long minval" +.Fa "long long maxval" +.Fa "const char **errstr" +.Fa "int base" +.Fc .Sh DESCRIPTION The .Fn strtonum -function converts the string in +and +.Fn strtonumx +functions convert the string in .Fa nptr to a .Vt "long long" value. -The -.Fn strtonum -function was designed to facilitate safe, robust programming -and overcome the shortcomings of the +These functions were designed to facilitate safe, robust programming and +overcome the shortcomings of the .Xr atoi 3 and .Xr strtol 3 family of interfaces. .Pp The string may begin with an arbitrary amount of whitespace -(as determined by -.Xr isspace 3 ) +.Pq as determined by Xr isspace 3 followed by a single optional .Ql + or @@ -57,7 +66,10 @@ sign. .Pp The remainder of the string is converted to a .Vt "long long" -value according to base 10. +value according to base 10 +.Pq for Fn strtonum +or the provided base +.Pq for Fn strtonumx . .Pp The value obtained is then checked against the provided .Fa minval @@ -68,13 +80,30 @@ If .Fa errstr is non-null, .Fn strtonum -stores an error string in +and +.Fn strtonumx +store an error string in .Fa *errstr indicating the failure. +.Pp +For +.Fn strtonumx +the value of +.Ar base +is interpreted in the same way as described in +.Xr strtoll 3 . +In particular, if the value of +.Ar base +is 0, then the expected form of +.Ar nptr +is that of a decimal constant, octal constant or hexadecimal constant, any of +which may be preceded by a + or - sign. .Sh RETURN VALUES The .Fn strtonum -function returns the result of the conversion, +and +.Fn strtonumx +functions return the result of the conversion, unless the value would exceed the provided bounds or is invalid. On error, 0 is returned, .Va errno @@ -90,6 +119,8 @@ a successful return of 0 from an error. .Sh EXAMPLES Using .Fn strtonum +and +.Fn strtonumx correctly is meant to be simpler than the alternative functions. .Bd -literal -offset indent int iterations; @@ -107,7 +138,10 @@ The above example will guarantee that the value of iterations is between .It Bq Er ERANGE The given string was out of range. .It Bq Er EINVAL -The given string did not consist solely of digit characters. +The given string did not consist solely of digit characters +.Pq for Fn strtonum , +or characters which are valid in the given base +.Pq for Fn strtonumx . .It Bq Er EINVAL The supplied .Fa minval @@ -120,12 +154,15 @@ If an error occurs, will be set to one of the following strings: .Pp .Bl -tag -width ".Li too large" -compact -.It Li "too large" +.It Qq too large The result was larger than the provided maximum value. -.It Li "too small" +.It Qq too small The result was smaller than the provided minimum value. -.It Li invalid -The string did not consist solely of digit characters. +.It Qq invalid +The string did not consist solely of characters valid in the specified base +.Pq or base 10 for Fn strtonum . +.It Qq unparsable; invalid base specified +The specified base was outside the permitted range. .El .Sh SEE ALSO .Xr atof 3 , @@ -152,3 +189,6 @@ The .Fn strtonum function first appeared in .Ox 3.6 . +The +.Fn strtonumx +function first appeared in illumos in 2023. diff --git a/lib/libc/stdlib/strtonum.c b/lib/libc/stdlib/strtonum.c index 0d0715bf39c1..44c27d6af3ad 100644 --- a/lib/libc/stdlib/strtonum.c +++ b/lib/libc/stdlib/strtonum.c @@ -2,6 +2,8 @@ * Copyright (c) 2004 Ted Unangst and Todd Miller * All rights reserved. * + * Copyright 2023 Oxide Computer Company + * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. @@ -24,10 +26,13 @@ #define INVALID 1 #define TOOSMALL 2 #define TOOLARGE 3 +#define BADBASE 4 + +#define MBASE ('z' - 'a' + 1 + 10) long long -strtonum(const char *numstr, long long minval, long long maxval, - const char **errstrp) +strtonumx(const char *numstr, long long minval, long long maxval, + const char **errstrp, int base) { long long ll = 0; int error = 0; @@ -35,20 +40,23 @@ strtonum(const char *numstr, long long minval, long long maxval, struct errval { const char *errstr; int err; - } ev[4] = { + } ev[5] = { { NULL, 0 }, { "invalid", EINVAL }, { "too small", ERANGE }, { "too large", ERANGE }, + { "unparsable; invalid base specified", EINVAL }, }; ev[0].err = errno; errno = 0; if (minval > maxval) { error = INVALID; + } else if (base < 0 || base > MBASE || base == 1) { + error = BADBASE; } else { - ll = strtoll(numstr, &ep, 10); - if (errno == EINVAL || numstr == ep || *ep != '\0') + ll = strtoll(numstr, &ep, base); + if (numstr == ep || *ep != '\0') error = INVALID; else if ((ll == LLONG_MIN && errno == ERANGE) || ll < minval) error = TOOSMALL; @@ -58,8 +66,15 @@ strtonum(const char *numstr, long long minval, long long maxval, if (errstrp != NULL) *errstrp = ev[error].errstr; errno = ev[error].err; - if (error) + if (error != 0) ll = 0; return (ll); } + +long long +strtonum(const char *numstr, long long minval, long long maxval, + const char **errstrp) +{ + return (strtonumx(numstr, minval, maxval, errstrp, 10)); +} diff --git a/lib/libc/stdlib/tdestroy.c b/lib/libc/stdlib/tdestroy.c index c324e151da11..2aeb02228e46 100644 --- a/lib/libc/stdlib/tdestroy.c +++ b/lib/libc/stdlib/tdestroy.c @@ -16,53 +16,51 @@ nul_node_free(void *node __unused) { } -/* Find the leftmost node. */ -static posix_tnode * -tdestroy_find_leftmost(posix_tnode *tn) -{ - while (tn->llink != NULL) - tn = tn->llink; - return (tn); -} - -/* - * This algorithm for non-recursive non-allocating destruction of the tree - * is described in - * https://codegolf.stackexchange.com/questions/478/free-a-binary-tree/489#489P - * and in https://devblogs.microsoft.com/oldnewthing/20251107-00/?p=111774. - */ void tdestroy(void *rootp, void (*node_free)(void *)) { - posix_tnode *tn, *tn_leftmost, *xtn; + posix_tnode *back, *curr, **front; - tn = rootp; - if (tn == NULL) + if (rootp == NULL) return; if (node_free == NULL) node_free = nul_node_free; - tn_leftmost = tn; - while (tn != NULL) { + back = rootp; + front = &back; + + for (;;) { /* - * Make the right subtree the left subtree of the - * leftmost node, and recalculate the leftmost. + * The sequence of nodes from back to just before *front linked + * by llink have been found to have non-NULL rlink. + * + * Extend *front to (*front)->llink, deleting *front from the + * sequence if it has a NULL rlink. */ - tn_leftmost = tdestroy_find_leftmost(tn_leftmost); - if (tn->rlink != NULL) { - tn_leftmost->llink = tn->rlink; - tn_leftmost = tn_leftmost->llink; + curr = *front; + if (curr->rlink != NULL) + front = &curr->llink; + else { + *front = curr->llink; + node_free(curr->key); + free(curr); } + if (*front != NULL) + continue; + if (back == NULL) + break; /* - * At this point, all children of tn have been - * arranged to be reachable via tn->left. We can - * safely delete the current node and advance to its - * left child as the new root. + * The sequence cannot be extended because *front is NULL. Make + * the rlink of the back node the new *front, the llink of the + * back node the new back, and free the old back node. */ - xtn = tn->llink; - node_free(tn->key); - free(tn); - tn = xtn; + curr = back; + back = curr->llink; + if (back == NULL) + front = &back; + *front = curr->rlink; + node_free(curr->key); + free(curr); } } diff --git a/lib/libc/string/bcmp.3 b/lib/libc/string/bcmp.3 index 954e10bfdeab..ec3caa0f1c9f 100644 --- a/lib/libc/string/bcmp.3 +++ b/lib/libc/string/bcmp.3 @@ -27,7 +27,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd August 15, 2016 +.Dd October 8, 2025 .Dt BCMP 3 .Os .Sh NAME @@ -73,3 +73,11 @@ before it was moved to for .St -p1003.1-2001 compliance. +.Pp +.St -p1003.1-2008 +removes the specification of +.Fn bcmp +and it is marked as LEGACY in +.St -p1003.1-2004 . +For portability with other systems new programs should use +.Xr memcmp 3 . diff --git a/lib/libc/string/swab.c b/lib/libc/string/swab.c index ed4436a49810..4f4fb26379c6 100644 --- a/lib/libc/string/swab.c +++ b/lib/libc/string/swab.c @@ -3,14 +3,16 @@ * Copyright (c) 2024 rilysh <nightquick@proton.me> */ +#include <string.h> #include <unistd.h> #include <sys/endian.h> void swab(const void * __restrict from, void * __restrict to, ssize_t len) { - const uint16_t *f __aligned(1) = from; - uint16_t *t __aligned(1) = to; + const char *f = from; + char *t = to; + uint16_t tmp; /* * POSIX says overlapping copy behavior is undefined, however many @@ -19,7 +21,12 @@ swab(const void * __restrict from, void * __restrict to, ssize_t len) * and swapping them before writing them back accomplishes this. */ while (len > 1) { - *t++ = bswap16(*f++); + memcpy(&tmp, f, 2); + tmp = bswap16(tmp); + memcpy(t, &tmp, 2); + + f += 2; + t += 2; len -= 2; } } diff --git a/lib/libc/sys/Symbol.map b/lib/libc/sys/Symbol.map index 32b1b0ecee05..8acffcfd714e 100644 --- a/lib/libc/sys/Symbol.map +++ b/lib/libc/sys/Symbol.map @@ -71,3 +71,7 @@ FBSD_1.6 { FBSD_1.7 { _Fork; }; + +FBSD_1.9 { + pdwait; +}; diff --git a/lib/libc/sys/pdwait.c b/lib/libc/sys/pdwait.c new file mode 100644 index 000000000000..89d43b7fca2e --- /dev/null +++ b/lib/libc/sys/pdwait.c @@ -0,0 +1,20 @@ +/* + * Copyright 2026 The FreeBSD Foundation. + * + * SPDX-License-Identifier: BSD-2-Clause + * + * This software was developed by Konstantin Belousov <kib@FreeBSD.org> + * under sponsorship from the FreeBSD Foundation. + */ + +#include <sys/types.h> +#include <sys/procdesc.h> +#include "libc_private.h" + +#pragma weak pdwait +int +pdwait(int fd, int *status, int options, struct __wrusage *ru, + struct __siginfo *infop) +{ + return (INTERPOS_SYS(pdwait, fd, status, options, ru, infop)); +} diff --git a/lib/libc/tests/gen/dir2_test.c b/lib/libc/tests/gen/dir2_test.c index 4ec5a1759d06..dac8ccbe472a 100644 --- a/lib/libc/tests/gen/dir2_test.c +++ b/lib/libc/tests/gen/dir2_test.c @@ -41,7 +41,6 @@ ATF_TC(telldir_after_seekdir); ATF_TC_HEAD(telldir_after_seekdir, tc) { - atf_tc_set_md_var(tc, "descr", "Calling telldir(3) after seekdir(3) " "should return the argument passed to seekdir."); } @@ -50,7 +49,7 @@ ATF_TC_BODY(telldir_after_seekdir, tc) const int NUMFILES = 1000; char template[] = "dXXXXXX"; char *tmpdir; - int i, dirfd; + int i, dd; DIR *dirp; struct dirent *de; long beginning, middle, end, td; @@ -58,8 +57,8 @@ ATF_TC_BODY(telldir_after_seekdir, tc) /* Create a temporary directory */ tmpdir = mkdtemp(template); ATF_REQUIRE_MSG(tmpdir != NULL, "mkdtemp failed"); - dirfd = open(tmpdir, O_RDONLY | O_DIRECTORY); - ATF_REQUIRE(dirfd > 0); + dd = open(tmpdir, O_RDONLY | O_DIRECTORY); + ATF_REQUIRE(dd > 0); /* * Fill it with files. Must be > 128 to ensure that the directory @@ -70,14 +69,14 @@ ATF_TC_BODY(telldir_after_seekdir, tc) char filename[16]; snprintf(filename, sizeof(filename), "%d", i); - fd = openat(dirfd, filename, O_WRONLY | O_CREAT, 0600); + fd = openat(dd, filename, O_WRONLY | O_CREAT, 0600); ATF_REQUIRE(fd > 0); close(fd); } /* Get some directory bookmarks in various locations */ - dirp = fdopendir(dirfd); - ATF_REQUIRE_MSG(dirfd >= 0, "fdopendir failed"); + dirp = fdopendir(dd); + ATF_REQUIRE_MSG(dd >= 0, "fdopendir failed"); beginning = telldir(dirp); for (i = 0; i < NUMFILES / 2; i = i+1) { de = readdir(dirp); @@ -126,7 +125,7 @@ ATF_TC_BODY(telldir_at_end_of_block, tc) const int NUMFILES = 129; char template[] = "dXXXXXX"; char *tmpdir; - int i, dirfd; + int i, dd; DIR *dirp; struct dirent *de; long td; @@ -135,8 +134,8 @@ ATF_TC_BODY(telldir_at_end_of_block, tc) /* Create a temporary directory */ tmpdir = mkdtemp(template); ATF_REQUIRE_MSG(tmpdir != NULL, "mkdtemp failed"); - dirfd = open(tmpdir, O_RDONLY | O_DIRECTORY); - ATF_REQUIRE(dirfd > 0); + dd = open(tmpdir, O_RDONLY | O_DIRECTORY); + ATF_REQUIRE(dd > 0); /* * Fill it with files. Must be > 128 to ensure that the directory @@ -147,14 +146,14 @@ ATF_TC_BODY(telldir_at_end_of_block, tc) char filename[16]; snprintf(filename, sizeof(filename), "%d", i); - fd = openat(dirfd, filename, O_WRONLY | O_CREAT, 0600); + fd = openat(dd, filename, O_WRONLY | O_CREAT, 0600); ATF_REQUIRE(fd > 0); close(fd); } /* Read all entries within the first page */ - dirp = fdopendir(dirfd); - ATF_REQUIRE_MSG(dirfd >= 0, "fdopendir failed"); + dirp = fdopendir(dd); + ATF_REQUIRE_MSG(dd >= 0, "fdopendir failed"); for (i = 0; i < NUMFILES - 1; i = i + 1) ATF_REQUIRE_MSG(readdir(dirp) != NULL, "readdir failed"); @@ -178,9 +177,8 @@ ATF_TC_BODY(telldir_at_end_of_block, tc) ATF_TP_ADD_TCS(tp) { - ATF_TP_ADD_TC(tp, telldir_after_seekdir); ATF_TP_ADD_TC(tp, telldir_at_end_of_block); - return atf_no_error(); + return (atf_no_error()); } diff --git a/lib/libc/xdr/xdr.c b/lib/libc/xdr/xdr.c index 59a843405abf..47aafea4bc30 100644 --- a/lib/libc/xdr/xdr.c +++ b/lib/libc/xdr/xdr.c @@ -696,6 +696,13 @@ xdr_string(XDR *xdrs, char **cpp, u_int maxsize) if (sp == NULL) { return(TRUE); /* already free */ } + /* + * XXX: buggy software may call this without a third + * argument via xdr_free(). Ignore maxsize since it may + * be invalid. Otherwise, if it's very small, we might + * fail to free the string. + */ + maxsize = RPC_MAXDATASIZE; /* FALLTHROUGH */ case XDR_ENCODE: size = strlen(sp); diff --git a/lib/libdiff/tests/libdiff_test.c b/lib/libdiff/tests/libdiff_test.c index e82a36f3d38a..2a7c1b37e2c2 100644 --- a/lib/libdiff/tests/libdiff_test.c +++ b/lib/libdiff/tests/libdiff_test.c @@ -45,10 +45,9 @@ ATF_TC_BODY(diff_atomize_truncated, tc) rewind(f); ATF_REQUIRE(truncate(fn, size / 2) == 0); ATF_REQUIRE((p = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fileno(f), 0)) != MAP_FAILED); - ATF_REQUIRE(diff_atomize_file(&d, &cfg, f, p, size, 0) == 0); + ATF_REQUIRE(diff_atomize_file(&d, &cfg, f, p, size, 0) == EIO); ATF_REQUIRE((size_t)d.len <= size / 2); ATF_REQUIRE((size_t)d.len >= size / 2 - sizeof(line)); - ATF_REQUIRE(d.atomizer_flags & DIFF_ATOMIZER_FILE_TRUNCATED); } ATF_TC_CLEANUP(diff_atomize_truncated, tc) { diff --git a/lib/libexecinfo/tests/sigtramp_test.c b/lib/libexecinfo/tests/sigtramp_test.c index a4943d8d7f4f..5dead9a4146b 100644 --- a/lib/libexecinfo/tests/sigtramp_test.c +++ b/lib/libexecinfo/tests/sigtramp_test.c @@ -45,6 +45,14 @@ handler(int signum __unused) ATF_TC_WITHOUT_HEAD(test_backtrace_sigtramp); ATF_TC_BODY(test_backtrace_sigtramp, tc) { +#if defined(__aarch64__) + /* + * https://reviews.llvm.org is deprecated and + * this review is never going to be updated or completed + */ + atf_tc_expect_fail("https://reviews.llvm.org/D155066"); +#endif + struct sigaction act; pid_t child; int status; diff --git a/lib/libfetch/common.c b/lib/libfetch/common.c index 2e1364edff04..75cb4320ad99 100644 --- a/lib/libfetch/common.c +++ b/lib/libfetch/common.c @@ -286,6 +286,9 @@ fetch_reopen(int sd) flags = fcntl(sd, F_GETFD); if (flags != -1 && (flags & FD_CLOEXEC) == 0) (void)fcntl(sd, F_SETFD, flags | FD_CLOEXEC); + flags = fcntl(sd, F_GETFL); + if (flags != -1 && (flags & O_NONBLOCK) == 0) + (void)fcntl(sd, F_SETFL, flags | O_NONBLOCK); (void)setsockopt(sd, SOL_SOCKET, SO_NOSIGPIPE, &opt, sizeof(opt)); conn->sd = sd; ++conn->ref; @@ -1048,6 +1051,8 @@ fetch_ssl_setup_transport_layer(SSL_CTX *ctx, int verbose) ssl_ctx_options |= SSL_OP_NO_TLSv1_1; if (getenv("SSL_NO_TLS1_2") != NULL) ssl_ctx_options |= SSL_OP_NO_TLSv1_2; + if (getenv("SSL_NO_TLS1_3") != NULL) + ssl_ctx_options |= SSL_OP_NO_TLSv1_3; if (verbose) fetch_info("SSL options: %lx", ssl_ctx_options); SSL_CTX_set_options(ctx, ssl_ctx_options); @@ -1180,8 +1185,11 @@ fetch_ssl(conn_t *conn, const struct url *URL, int verbose) X509_NAME *name; char *str; - conn->ssl_meth = SSLv23_client_method(); - conn->ssl_ctx = SSL_CTX_new(conn->ssl_meth); + if ((conn->ssl_ctx = SSL_CTX_new(TLS_client_method())) == NULL) { + fprintf(stderr, "SSL context creation failed\n"); + ERR_print_errors_fp(stderr); + return (-1); + } SSL_CTX_set_mode(conn->ssl_ctx, SSL_MODE_AUTO_RETRY); fetch_ssl_setup_transport_layer(conn->ssl_ctx, verbose); @@ -1192,7 +1200,8 @@ fetch_ssl(conn_t *conn, const struct url *URL, int verbose) conn->ssl = SSL_new(conn->ssl_ctx); if (conn->ssl == NULL) { - fprintf(stderr, "SSL context creation failed\n"); + fprintf(stderr, "SSL connection creation failed\n"); + ERR_print_errors_fp(stderr); return (-1); } SSL_set_fd(conn->ssl, conn->sd); diff --git a/lib/libfetch/common.h b/lib/libfetch/common.h index 7396c8a68ab6..06089aae5451 100644 --- a/lib/libfetch/common.h +++ b/lib/libfetch/common.h @@ -56,7 +56,6 @@ struct fetchconn { SSL *ssl; /* SSL handle */ SSL_CTX *ssl_ctx; /* SSL context */ X509 *ssl_cert; /* server certificate */ - const SSL_METHOD *ssl_meth; /* SSL method */ #endif int ref; /* reference count */ }; diff --git a/lib/libfetch/fetch.3 b/lib/libfetch/fetch.3 index 5f7489799cf6..20a22a263b5b 100644 --- a/lib/libfetch/fetch.3 +++ b/lib/libfetch/fetch.3 @@ -24,7 +24,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd October 7, 2023 +.Dd January 22, 2026 .Dt FETCH 3 .Os .Sh NAME @@ -450,9 +450,11 @@ allows TLSv1 and newer when negotiating the connecting with the remote peer. You can change this behavior by setting the .Ev SSL_NO_TLS1 , -.Ev SSL_NO_TLS1_1 and -.Ev SSL_NO_TLS1_2 -environment variables to disable TLS 1.0, 1.1 and 1.2 respectively. +.Ev SSL_NO_TLS1_1 , +.Ev SSL_NO_TLS1_2 and +.Ev SSL_NO_TLS1_3 +environment variables to disable TLS 1.0, 1.1, 1.2 and 1.3 +respectively. .Sh AUTHENTICATION Apart from setting the appropriate environment variables and specifying the user name and password in the URL or the @@ -676,6 +678,8 @@ Do not allow TLS version 1.0 when negotiating the connection. Do not allow TLS version 1.1 when negotiating the connection. .It Ev SSL_NO_TLS1_2 Do not allow TLS version 1.2 when negotiating the connection. +.It Ev SSL_NO_TLS1_3 +Do not allow TLS version 1.3 when negotiating the connection. .It Ev SSL_NO_VERIFY_HOSTNAME If set, do not verify that the hostname matches the subject of the certificate presented by the server. diff --git a/lib/libiconv_modules/mapper_std/citrus_mapper_std.c b/lib/libiconv_modules/mapper_std/citrus_mapper_std.c index 5db142fcd36a..4284995e169b 100644 --- a/lib/libiconv_modules/mapper_std/citrus_mapper_std.c +++ b/lib/libiconv_modules/mapper_std/citrus_mapper_std.c @@ -307,7 +307,7 @@ rowcol_init(struct _citrus_mapper_std *ms) rc->rc_dst_ilseq = be32toh(eix->eix_dst_ilseq); } - /* calcurate expected table size */ + /* calculate expected table size */ i = rc->rc_src_rowcol_len; lz = &rc->rc_src_rowcol[--i]; table_size = lz->width; diff --git a/lib/libjail/jail.c b/lib/libjail/jail.c index ad3348af0d2d..baabeb4afed9 100644 --- a/lib/libjail/jail.c +++ b/lib/libjail/jail.c @@ -29,12 +29,14 @@ #include <sys/param.h> #include <sys/jail.h> #include <sys/linker.h> +#include <sys/mac.h> #include <sys/socket.h> #include <sys/sysctl.h> #include <arpa/inet.h> #include <netinet/in.h> +#include <assert.h> #include <errno.h> #include <inttypes.h> #include <stdio.h> @@ -46,12 +48,72 @@ #define SJPARAM "security.jail.param" -#define JPS_IN_ADDR 1 -#define JPS_IN6_ADDR 2 +#define JPSDEF_OF(jp) \ + ((jp)->jp_structtype >= 0 ? &jp_structdefs[(jp)->jp_structtype] : NULL) + +static int jps_get(struct jailparam *, struct iovec *); +static int jps_set(const struct jailparam *, struct iovec *); +static void jps_free(struct jailparam *); + +typedef int (jps_import_t)(const struct jailparam *, int, const char *); +typedef char *(jps_export_t)(const struct jailparam *, int); +typedef int (jps_get_t)(struct jailparam *, struct iovec *); +typedef int (jps_set_t)(const struct jailparam *, struct iovec *); +typedef void (jps_free_t)(struct jailparam *); + +static jps_import_t jps_import_in_addr; +static jps_import_t jps_import_in6_addr; +static jps_import_t jps_import_mac_label; + +static jps_export_t jps_export_in_addr; +static jps_export_t jps_export_in6_addr; +static jps_export_t jps_export_mac_label; + +static jps_get_t jps_get_mac_label; + +static jps_set_t jps_set_mac_label; + +static jps_free_t jps_free_mac_label; + +static const struct jp_structdef { + const char *jps_type; /* sysctl type */ + size_t jps_valuelen; /* value size */ + jps_import_t *jps_import; /* jailparam_import() */ + jps_export_t *jps_export; /* jailparam_export() */ + jps_get_t *jps_get; /* jailparam_get() */ + jps_set_t *jps_set; /* jailparam_set() */ + jps_free_t *jps_free; /* jailparam_free() */ +} jp_structdefs[] = { + { + .jps_type = "S,in_addr", + .jps_valuelen = sizeof(struct in_addr), + .jps_import = jps_import_in_addr, + .jps_export = jps_export_in_addr, + }, + { + .jps_type = "S,in6_addr", + .jps_valuelen = sizeof(struct in6_addr), + .jps_import = jps_import_in6_addr, + .jps_export = jps_export_in6_addr, + }, + { + .jps_type = "S,mac", + .jps_valuelen = sizeof(mac_t *), + .jps_import = jps_import_mac_label, + .jps_export = jps_export_mac_label, + .jps_get = jps_get_mac_label, + .jps_set = jps_set_mac_label, + .jps_free = jps_free_mac_label, + }, +}; + +_Static_assert(nitems(jp_structdefs) <= INT_MAX, + "Too many struct definitions requires an ABI break in struct jailparam"); #define ARRAY_SANITY 5 #define ARRAY_SLOP 5 +static const struct jp_structdef *jp_structinfo(const char *type, int *); static int jailparam_import_enum(const char **values, int nvalues, const char *valstr, size_t valsize, int *value); @@ -66,6 +128,23 @@ char jail_errmsg[JAIL_ERRMSGLEN]; static const char *bool_values[] = { "false", "true" }; static const char *jailsys_values[] = { "disable", "new", "inherit" }; +static const struct jp_structdef * +jp_structinfo(const char *type, int *oidx) +{ + const struct jp_structdef *jpsdef; + + for (size_t idx = 0; idx < nitems(jp_structdefs); idx++) { + jpsdef = &jp_structdefs[idx]; + + if (strcmp(jpsdef->jps_type, type) == 0) { + *oidx = (int)idx; + return (jpsdef); + } + } + + *oidx = -1; + return (NULL); +} /* * Import a null-terminated parameter list and set a jail with the flags @@ -325,6 +404,7 @@ jailparam_import(struct jailparam *jp, const char *value) { char *p, *ep, *tvalue; const char *avalue; + const struct jp_structdef *jpsdef; int i, nval, fw; if (value == NULL) @@ -426,34 +506,13 @@ jailparam_import(struct jailparam *jp, const char *value) case CTLTYPE_STRUCT: tvalue = alloca(fw + 1); strlcpy(tvalue, avalue, fw + 1); - switch (jp->jp_structtype) { - case JPS_IN_ADDR: - if (inet_pton(AF_INET, tvalue, - &((struct in_addr *)jp->jp_value)[i]) != 1) - { - snprintf(jail_errmsg, - JAIL_ERRMSGLEN, - "%s: not an IPv4 address: %s", - jp->jp_name, tvalue); - errno = EINVAL; - goto error; - } - break; - case JPS_IN6_ADDR: - if (inet_pton(AF_INET6, tvalue, - &((struct in6_addr *)jp->jp_value)[i]) != 1) - { - snprintf(jail_errmsg, - JAIL_ERRMSGLEN, - "%s: not an IPv6 address: %s", - jp->jp_name, tvalue); - errno = EINVAL; - goto error; - } - break; - default: + + if (jp->jp_structtype == -1) goto unknown_type; - } + + jpsdef = &jp_structdefs[jp->jp_structtype]; + if ((*jpsdef->jps_import)(jp, i, tvalue) != 0) + goto error; break; default: unknown_type: @@ -548,7 +607,7 @@ jailparam_set(struct jailparam *jp, unsigned njp, int flags) /* No value means key removal. */ jiov[i].iov_base = NULL; jiov[i].iov_len = 0; - } else { + } else if (jps_set(&jp[j], &jiov[i]) != 0) { /* * Try to fill in missing values with an empty string. */ @@ -593,7 +652,7 @@ jailparam_get(struct jailparam *jp, unsigned njp, int flags) { struct iovec *jiov; struct jailparam *jp_desc, *jp_lastjid, *jp_jid, *jp_name, *jp_key; - int i, ai, ki, jid, arrays, sanity; + int i, ai, ki, jid, arrays, processed, sanity; unsigned j; /* @@ -695,6 +754,26 @@ jailparam_get(struct jailparam *jp, unsigned njp, int flags) JAIL_ERRMSGLEN); return (-1); } + + /* + * Returns -1 on error, or # index populated on + * success. 0 is perfectly valid for a type + * that may want to simply initialize the value + * as needed. + */ + processed = jps_get(&jp[j], &jiov[i]); + if (processed == -1) { + return (-1); + } else if (processed > 0) { + /* + * The above math for jiov sizing does + * not really account for one param + * expanding to multiple entries. + */ + assert(processed == 1); + i += processed; + continue; + } } jiov[i].iov_base = jp[j].jp_value; jiov[i].iov_len = jp[j].jp_valuelen; @@ -773,6 +852,7 @@ jailparam_export(struct jailparam *jp) { size_t *valuelens; char *value, *tvalue, **values; + const struct jp_structdef *jpsdef; size_t valuelen; int i, nval, ival; char valbuf[INET6_ADDRSTRLEN]; @@ -839,29 +919,25 @@ jailparam_export(struct jailparam *jp) (uintmax_t)((uint64_t *)jp->jp_value)[i]); break; case CTLTYPE_STRUCT: - switch (jp->jp_structtype) { - case JPS_IN_ADDR: - if (inet_ntop(AF_INET, - &((struct in_addr *)jp->jp_value)[i], - valbuf, sizeof(valbuf)) == NULL) { - strerror_r(errno, jail_errmsg, - JAIL_ERRMSGLEN); - return (NULL); - } - break; - case JPS_IN6_ADDR: - if (inet_ntop(AF_INET6, - &((struct in6_addr *)jp->jp_value)[i], - valbuf, sizeof(valbuf)) == NULL) { - strerror_r(errno, jail_errmsg, - JAIL_ERRMSGLEN); - return (NULL); - } - break; - default: + if (jp->jp_structtype == -1) goto unknown_type; + + jpsdef = &jp_structdefs[jp->jp_structtype]; + value = (*jpsdef->jps_export)(jp, i); + if (value == NULL) { + strerror_r(errno, jail_errmsg, + JAIL_ERRMSGLEN); + return (NULL); } - break; + + valuelens[i] = strlen(value) + 1; + valuelen += valuelens[i]; + values[i] = alloca(valuelens[i]); + strcpy(values[i], value); + + free(value); + value = NULL; + continue; /* Value already added to values[] */ default: unknown_type: snprintf(jail_errmsg, JAIL_ERRMSGLEN, @@ -896,12 +972,15 @@ jailparam_export(struct jailparam *jp) void jailparam_free(struct jailparam *jp, unsigned njp) { + unsigned j; for (j = 0; j < njp; j++) { free(jp[j].jp_name); - if (!(jp[j].jp_flags & JP_RAWVALUE)) + if (!(jp[j].jp_flags & JP_RAWVALUE)) { + jps_free(jp); free(jp[j].jp_value); + } } } @@ -1055,14 +1134,17 @@ unknown_parameter: } jp->jp_valuelen = strtoul(desc.s, NULL, 10); break; - case CTLTYPE_STRUCT: - if (!strcmp(desc.s, "S,in_addr")) { - jp->jp_structtype = JPS_IN_ADDR; - jp->jp_valuelen = sizeof(struct in_addr); - } else if (!strcmp(desc.s, "S,in6_addr")) { - jp->jp_structtype = JPS_IN6_ADDR; - jp->jp_valuelen = sizeof(struct in6_addr); + case CTLTYPE_STRUCT: { + const struct jp_structdef *jpsdef; + + jpsdef = jp_structinfo(desc.s, &jp->jp_structtype); + if (jpsdef != NULL) { + assert(jp->jp_structtype >= 0); + + jp->jp_valuelen = jpsdef->jps_valuelen; } else { + assert(jp->jp_structtype == -1); + desclen = 0; if (sysctl(mib + 2, miblen / sizeof(int), NULL, &jp->jp_valuelen, NULL, 0) < 0) { @@ -1073,6 +1155,7 @@ unknown_parameter: } } break; + } case CTLTYPE_NODE: /* * A node might be described by an empty-named child, @@ -1215,3 +1298,201 @@ kvname(const char *name) return (kvname); } + +static int +jps_get(struct jailparam *jp, struct iovec *jiov) +{ + const struct jp_structdef *jpsdef; + + jpsdef = JPSDEF_OF(jp); + if (jpsdef == NULL || jpsdef->jps_get == NULL) + return (0); /* Nop, but not an error. */ + + return ((jpsdef->jps_get)(jp, jiov)); +} + +static int +jps_set(const struct jailparam *jp, struct iovec *jiov) +{ + const struct jp_structdef *jpsdef; + + jpsdef = JPSDEF_OF(jp); + if (jpsdef == NULL || jpsdef->jps_set == NULL) + return (EINVAL); /* Unhandled */ + + return ((jpsdef->jps_set)(jp, jiov)); +} + +static void +jps_free(struct jailparam *jp) +{ + const struct jp_structdef *jpsdef; + + jpsdef = JPSDEF_OF(jp); + if (jpsdef == NULL) + return; + + if (jpsdef->jps_free != NULL) + jpsdef->jps_free(jp); +} + +static int +jps_import_in_addr(const struct jailparam *jp, int i, const char *value) +{ + struct in_addr *addr; + + addr = &((struct in_addr *)jp->jp_value)[i]; + if (inet_pton(AF_INET, value, addr) != 1) { + snprintf(jail_errmsg, JAIL_ERRMSGLEN, + "%s: not an IPv4 address: %s", jp->jp_name, value); + errno = EINVAL; + return (-1); + } + + return (0); +} + +static int +jps_import_in6_addr(const struct jailparam *jp, int i, const char *value) +{ + struct in6_addr *addr6; + + addr6 = &((struct in6_addr *)jp->jp_value)[i]; + if (inet_pton(AF_INET6, value, addr6) != 1) { + snprintf(jail_errmsg, JAIL_ERRMSGLEN, + "%s: not an IPv6 address: %s", jp->jp_name, value); + errno = EINVAL; + return (-1); + } + + return (0); +} + +static int +jps_import_mac_label(const struct jailparam *jp, int i, const char *value) +{ + mac_t *pmac; + + pmac = &((mac_t *)jp->jp_value)[i]; + if (mac_from_text(pmac, value) != 0) { + int serrno = errno; + + snprintf(jail_errmsg, JAIL_ERRMSGLEN, "%s: mac_from_text: %s", + jp->jp_name, strerror(errno)); + errno = serrno; + return (-1); + } + + return (0); +} + +static char * +jps_export_in_addr(const struct jailparam *jp, int i) +{ + struct in_addr *addr; + char valbuf[INET_ADDRSTRLEN]; + + addr = &((struct in_addr *)jp->jp_value)[i]; + if (inet_ntop(AF_INET, addr, valbuf, sizeof(valbuf)) == NULL) + return (NULL); + + /* Error checked by caller. */ + return (strdup(valbuf)); +} + +static char * +jps_export_in6_addr(const struct jailparam *jp, int i) +{ + struct in6_addr *addr6; + char valbuf[INET6_ADDRSTRLEN]; + + addr6 = &((struct in6_addr *)jp->jp_value)[i]; + if (inet_ntop(AF_INET6, addr6, valbuf, sizeof(valbuf)) == NULL) + return (NULL); + + /* Error checked by caller. */ + return (strdup(valbuf)); +} + +static char * +jps_export_mac_label(const struct jailparam *jp, int i) +{ + mac_t *macp; + char *labelbuf; + int error; + + macp = &((mac_t *)jp->jp_value)[i]; + error = mac_to_text(*macp, &labelbuf); + if (error != 0) + return (NULL); + + return (labelbuf); +} + +static int +jps_get_mac_label(struct jailparam *jp, struct iovec *jiov) +{ + mac_t *pmac = jp->jp_value; + int error; + + error = mac_prepare_type(pmac, "jail"); + if (error != 0 && errno == ENOENT) { + /* + * We special-case the scenario where a system has a custom + * mac.conf(5) that doesn't include a jail entry -- just let + * an empty label slide. + */ + error = mac_prepare(pmac, "?"); + } + if (error != 0) { + int serrno = errno; + + free(jp->jp_value); + jp->jp_value = NULL; + + strerror_r(serrno, jail_errmsg, JAIL_ERRMSGLEN); + errno = serrno; + return (-1); + } + + /* + * MAC label gets special handling because libjail internally maintains + * it as a pointer to a mac_t, but we actually want to pass the mac_t + * itself. We don't want the jailparam_get() zeroing behavior, as it's + * initialized by us. + */ + jiov->iov_base = *pmac; + jiov->iov_len = sizeof(**pmac); + return (1); +} + +static int +jps_set_mac_label(const struct jailparam *jp, struct iovec *jiov) +{ + mac_t *pmac; + + /* + * MAC label gets special handling because libjail internally + * maintains it as a pointer to a mac_t, but we actually want to + * pass the mac_t itself. + */ + pmac = jp->jp_value; + if (pmac != NULL) { + jiov->iov_base = *pmac; + jiov->iov_len = sizeof(**pmac); + } else { + jiov->iov_base = NULL; + jiov->iov_len = 0; + } + + return (0); +} + +static void +jps_free_mac_label(struct jailparam *jp) +{ + mac_t *pmac = jp->jp_value; + + if (pmac != NULL) + mac_free(*pmac); +} diff --git a/lib/libmd/aarch64/md5block.S b/lib/libmd/aarch64/md5block.S index b928c8dd795a..297db7f6bdbd 100644 --- a/lib/libmd/aarch64/md5block.S +++ b/lib/libmd/aarch64/md5block.S @@ -134,7 +134,7 @@ d_ .req m5 stp x23, x24, [sp, #0x20] stp x25, x26, [sp, #0x30] - bics len, len, #63 // length in blocks + ands len, len, #~63 // length in blocks add end, buf, len // end pointer beq .Lend // was len == 0 after BICS? diff --git a/lib/libnetbsd/sys/cdefs.h b/lib/libnetbsd/sys/cdefs.h index 8da812693961..fcc3cc6f15be 100644 --- a/lib/libnetbsd/sys/cdefs.h +++ b/lib/libnetbsd/sys/cdefs.h @@ -1,4 +1,3 @@ - /*- * SPDX-License-Identifier: BSD-3-Clause * @@ -81,4 +80,18 @@ #define __BITS(__m, __n) \ ((__BIT(MAX((__m), (__n)) + 1) - 1) ^ (__BIT(MIN((__m), (__n))) - 1)) +/* + * To be used when an empty body is required like: + * + * #ifdef DEBUG + * # define dprintf(a) printf(a) + * #else + * # define dprintf(a) __nothing + * #endif + * + * We use ((void)0) instead of do {} while (0) so that it + * works on , expressions. + */ +#define __nothing (/*LINTED*/(void)0) + #endif /* _LIBNETBSD_SYS_CDEFS_H_ */ diff --git a/lib/libnv/tests/nvlist_send_recv_test.c b/lib/libnv/tests/nvlist_send_recv_test.c index 79297dfe2043..4a5c10df656d 100644 --- a/lib/libnv/tests/nvlist_send_recv_test.c +++ b/lib/libnv/tests/nvlist_send_recv_test.c @@ -482,8 +482,6 @@ ATF_TC_BODY(nvlist_send_recv__send_many_fds__dgram, tc) size_t len; int error; - atf_tc_skip("https://bugs.freebsd.org/260891"); - /* size of the largest datagram to send */ temp_maxdgram = 16772; len = sizeof(maxdgram); diff --git a/lib/libpam/modules/pam_krb5/Makefile b/lib/libpam/modules/pam_krb5/Makefile index d55f2d0b5971..04caf7c1db44 100644 --- a/lib/libpam/modules/pam_krb5/Makefile +++ b/lib/libpam/modules/pam_krb5/Makefile @@ -63,6 +63,7 @@ MANSRC.pam_krb5.8=pam-krb5.8 CFLAGS= -I${SRCDIR} \ -I${.CURDIR} \ + -I${OBJTOP}/krb5/util/profile \ -fno-strict-aliasing \ -DHAVE_CONFIG_H diff --git a/lib/libpam/modules/pam_ksu/Makefile b/lib/libpam/modules/pam_ksu/Makefile index 8b695384c13c..90639b121466 100644 --- a/lib/libpam/modules/pam_ksu/Makefile +++ b/lib/libpam/modules/pam_ksu/Makefile @@ -39,6 +39,7 @@ LIBADD+= krb5 WARNS= 2 CFLAGS+= -I${SRCTOP}/crypto/krb5/src/include CFLAGS+= -I${SRCTOP}/krb5/include +CFLAGS+= -I${OBJTOP}/krb5/util/profile CFLAGS+= -include ${SRCTOP}/crypto/krb5/src/include/k5-int.h CFLAGS+= -DMK_MITKRB5=yes .endif diff --git a/lib/libpam/pam.d/cron b/lib/libpam/pam.d/cron index 733631306641..490663508a72 100644 --- a/lib/libpam/pam.d/cron +++ b/lib/libpam/pam.d/cron @@ -6,3 +6,6 @@ # account account required pam_nologin.so account required pam_unix.so + +# session +session required pam_permit.so diff --git a/lib/libpfctl/libpfctl.c b/lib/libpfctl/libpfctl.c index f8c92a5cd319..cd484949e4da 100644 --- a/lib/libpfctl/libpfctl.c +++ b/lib/libpfctl/libpfctl.c @@ -69,12 +69,15 @@ const char* PFCTL_SYNCOOKIES_MODE_NAMES[] = { static int _pfctl_clear_states(int , const struct pfctl_kill *, unsigned int *, uint64_t); +static void _pfctl_verify_parsers(void); struct pfctl_handle * pfctl_open(const char *pf_device) { struct pfctl_handle *h; + _pfctl_verify_parsers(); + h = calloc(1, sizeof(struct pfctl_handle)); h->fd = open(pf_device, O_RDWR); @@ -389,8 +392,8 @@ static const struct snl_attr_parser ap_getstatus[] = { { .type = PF_GS_FCOUNTERS, .off = _OUT(fcounters), .cb = snl_attr_get_counters }, { .type = PF_GS_SCOUNTERS, .off = _OUT(scounters), .cb = snl_attr_get_counters }, { .type = PF_GS_CHKSUM, .off = _OUT(pf_chksum), .arg_u32 = PF_MD5_DIGEST_LENGTH, .cb = snl_attr_get_bytes }, - { .type = PF_GS_BCOUNTERS, .off = _OUT(bcounters), .arg_u32 = 2 * 2, .cb = snl_attr_get_uint64_array }, { .type = PF_GS_PCOUNTERS, .off = _OUT(pcounters), .arg_u32 = 2 * 2 * 2, .cb = snl_attr_get_uint64_array }, + { .type = PF_GS_BCOUNTERS, .off = _OUT(bcounters), .arg_u32 = 2 * 2, .cb = snl_attr_get_uint64_array }, { .type = PF_GS_NCOUNTERS, .off = _OUT(ncounters), .cb = snl_attr_get_counters }, { .type = PF_GS_FRAGMENTS, .off = _OUT(fragments), .cb = snl_attr_get_uint64 }, }; @@ -1313,6 +1316,11 @@ snl_add_msg_attr_pf_rule(struct snl_writer *nw, uint32_t type, const struct pfct snl_add_msg_attr_ip6(nw, PF_RT_DIVERT_ADDRESS, &r->divert.addr.v6); snl_add_msg_attr_u16(nw, PF_RT_DIVERT_PORT, r->divert.port); + snl_add_msg_attr_u8(nw, PF_RT_STATE_LIMIT, r->statelim.id); + snl_add_msg_attr_u32(nw, PF_RT_STATE_LIMIT_ACTION, r->statelim.limiter_action); + snl_add_msg_attr_u8(nw, PF_RT_SOURCE_LIMIT, r->sourcelim.id); + snl_add_msg_attr_u32(nw, PF_RT_SOURCE_LIMIT_ACTION, r->sourcelim.limiter_action); + snl_end_attr_nested(nw, off); } @@ -1704,6 +1712,10 @@ static struct snl_attr_parser ap_getrule[] = { { .type = PF_RT_TYPE_2, .off = _OUT(r.type), .cb = snl_attr_get_uint16 }, { .type = PF_RT_CODE_2, .off = _OUT(r.code), .cb = snl_attr_get_uint16 }, { .type = PF_RT_EXPTIME, .off = _OUT(r.exptime), .cb = snl_attr_get_time_t }, + { .type = PF_RT_STATE_LIMIT, .off = _OUT(r.statelim.id), .cb = snl_attr_get_uint8 }, + { .type = PF_RT_SOURCE_LIMIT, .off = _OUT(r.sourcelim.id), .cb = snl_attr_get_uint8 }, + { .type = PF_RT_STATE_LIMIT_ACTION, .off = _OUT(r.statelim.limiter_action), .cb = snl_attr_get_uint32 }, + { .type = PF_RT_SOURCE_LIMIT_ACTION, .off = _OUT(r.sourcelim.limiter_action), .cb = snl_attr_get_uint32 }, }; #undef _OUT SNL_DECLARE_PARSER(getrule_parser, struct genlmsghdr, snl_f_p_empty, ap_getrule); @@ -1956,15 +1968,9 @@ static struct snl_attr_parser ap_state[] = { #undef _OUT SNL_DECLARE_PARSER(state_parser, struct genlmsghdr, snl_f_p_empty, ap_state); -static const struct snl_hdr_parser *all_parsers[] = { - &state_parser, &skey_parser, &speer_parser, - &creator_parser, &getrules_parser -}; - int pfctl_get_states_h(struct pfctl_handle *h, struct pfctl_state_filter *filter, pfctl_get_state_fn f, void *arg) { - SNL_VERIFY_PARSERS(all_parsers); int family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); int ret; @@ -3910,3 +3916,421 @@ pfctl_clr_astats(struct pfctl_handle *h, const struct pfr_table *tbl, return (ret); } +static int +_pfctl_test_addrs(struct pfctl_handle *h, const struct pfr_table *tbl, + struct pfr_addr *addrs, int size, int *nmatch, int flags) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + struct nl_astats attrs; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_TABLE_TEST_ADDRS); + + snl_add_msg_attr_table(&nw, PF_TA_TABLE, tbl); + snl_add_msg_attr_u32(&nw, PF_TA_FLAGS, flags); + for (int i = 0; i < size; i++) + snl_add_msg_attr_pfr_addr(&nw, PF_TA_ADDR, &addrs[i]); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + if (! snl_parse_nlmsg(&h->ss, hdr, &table_astats_parser, &attrs)) + continue; + } + + if (nmatch) + *nmatch = attrs.total_count; + + return (e.error); +} + +int +pfctl_test_addrs(struct pfctl_handle *h, const struct pfr_table *tbl, + struct pfr_addr *addrs, int size, int *nmatch, int flags) +{ + int ret; + int off = 0; + int partial_match; + int chunk_size; + + if (nmatch) + *nmatch = 0; + + do { + chunk_size = MIN(size - off, 256); + ret = _pfctl_test_addrs(h, tbl, &addrs[off], chunk_size, + &partial_match, flags); + if (ret != 0) + break; + if (nmatch) + *nmatch += partial_match; + off += chunk_size; + } while (off < size); + + return (ret); +} + +static void +snl_add_msg_attr_limit_rate(struct snl_writer *nw, uint32_t type, + const struct pfctl_limit_rate *rate) +{ + int off; + + off = snl_add_msg_attr_nested(nw, type); + + snl_add_msg_attr_u32(nw, PF_LR_LIMIT, rate->limit); + snl_add_msg_attr_u32(nw, PF_LR_SECONDS, rate->seconds); + + snl_end_attr_nested(nw, off); +} + +#define _OUT(_field) offsetof(struct pfctl_limit_rate, _field) +static const struct snl_attr_parser ap_limit_rate[] = { + { .type = PF_LR_LIMIT, .off = _OUT(limit), .cb = snl_attr_get_uint32 }, + { .type = PF_LR_SECONDS, .off = _OUT(seconds), .cb = snl_attr_get_uint32 }, +}; +SNL_DECLARE_ATTR_PARSER(limit_rate_parser, ap_limit_rate); +#undef _OUT + +#define _OUT(_field) offsetof(struct pfctl_state_lim, _field) +static struct snl_attr_parser ap_statelim[] = { + { .type = PF_SL_NAME, .off = _OUT(name), .arg_u32 = PF_STATELIM_NAME_LEN, .cb = snl_attr_copy_string }, + { .type = PF_SL_ID, .off = _OUT(id), .cb = snl_attr_get_uint32 }, + { .type = PF_SL_LIMIT, .off = _OUT(limit), .cb = snl_attr_get_uint32 }, + { .type = PF_SL_RATE, .off = _OUT(rate), .arg = &limit_rate_parser, .cb = snl_attr_get_nested }, + { .type = PF_SL_DESCR, .off = _OUT(description), .arg_u32 = PF_STATELIM_DESCR_LEN, .cb = snl_attr_copy_string }, + { .type = PF_SL_INUSE, .off = _OUT(inuse), .cb = snl_attr_get_uint32 }, + { .type = PF_SL_ADMITTED, .off = _OUT(admitted), .cb = snl_attr_get_uint64 }, + { .type = PF_SL_HARDLIMITED, .off = _OUT(hardlimited), .cb = snl_attr_get_uint64 }, + { .type = PF_SL_RATELIMITED, .off = _OUT(ratelimited), .cb = snl_attr_get_uint64 }, +}; +#undef _OUT +SNL_DECLARE_PARSER(statelim_parser, struct genlmsghdr, snl_f_p_empty, ap_statelim); + +int +pfctl_state_limiter_nget(struct pfctl_handle *h, struct pfctl_state_lim *lim) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_STATE_LIMITER_NGET); + + snl_add_msg_attr_u32(&nw, PF_SL_ID, lim->id); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + if (! snl_parse_nlmsg(&h->ss, hdr, &statelim_parser, lim)) + continue; + } + + return (e.error); +} + +int +pfctl_state_limiter_add(struct pfctl_handle *h, struct pfctl_state_lim *lim) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_STATE_LIMITER_ADD); + + snl_add_msg_attr_u32(&nw, PF_SL_ID, lim->id); + snl_add_msg_attr_u32(&nw, PF_SL_TICKET, lim->ticket); + snl_add_msg_attr_string(&nw, PF_SL_NAME, lim->name); + snl_add_msg_attr_u32(&nw, PF_SL_LIMIT, lim->limit); + snl_add_msg_attr_limit_rate(&nw, PF_SL_RATE, &lim->rate); + snl_add_msg_attr_string(&nw, PF_SL_DESCR, lim->description); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + if (! snl_parse_nlmsg(&h->ss, hdr, &statelim_parser, &lim)) + continue; + } + + return (e.error); +} + +#define _OUT(_field) offsetof(struct pfctl_source_lim, _field) +static struct snl_attr_parser ap_sourcelim[] = { + { .type = PF_SCL_NAME, .off = _OUT(name), .arg_u32 = PF_SOURCELIM_NAME_LEN, .cb = snl_attr_copy_string }, + { .type = PF_SCL_ID, .off = _OUT(id), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_ENTRIES, .off = _OUT(entries), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_LIMIT, .off = _OUT(limit), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_RATE, .off = _OUT(rate), .arg = &limit_rate_parser, .cb = snl_attr_get_nested }, + { .type = PF_SCL_OVERLOAD_TBL_NAME, .off = _OUT(overload_tblname), .arg_u32 = PF_TABLE_NAME_SIZE, .cb = snl_attr_copy_string }, + { .type = PF_SCL_OVERLOAD_HIGH_WM, .off = _OUT(overload_hwm), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_OVERLOAD_LOW_WM, .off = _OUT(overload_lwm), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_INET_PREFIX, .off = _OUT(inet_prefix), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_INET6_PREFIX, .off = _OUT(inet6_prefix), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_DESCR, .off = _OUT(description), .arg_u32 = PF_SOURCELIM_DESCR_LEN, .cb = snl_attr_copy_string }, + { .type = PF_SCL_NENTRIES, .off = _OUT(nentries), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_INUSE, .off = _OUT(inuse), .cb = snl_attr_get_uint32 }, + { .type = PF_SCL_ADDR_ALLOCS, .off = _OUT(addrallocs), .cb = snl_attr_get_uint64 }, + { .type = PF_SCL_ADDR_NOMEM, .off = _OUT(addrnomem), .cb = snl_attr_get_uint64 }, + { .type = PF_SCL_ADMITTED, .off = _OUT(admitted), .cb = snl_attr_get_uint64 }, + { .type = PF_SCL_ADDRLIMITED, .off = _OUT(addrlimited), .cb = snl_attr_get_uint64 }, + { .type = PF_SCL_HARDLIMITED, .off = _OUT(hardlimited), .cb = snl_attr_get_uint64 }, + { .type = PF_SCL_RATELIMITED, .off = _OUT(ratelimited), .cb = snl_attr_get_uint64 }, +}; +#undef _OUT +SNL_DECLARE_PARSER(sourcelim_parser, struct genlmsghdr, snl_f_p_empty, ap_sourcelim); + +int +pfctl_source_limiter_add(struct pfctl_handle *h, struct pfctl_source_lim *lim) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_SOURCE_LIMITER_ADD); + + snl_add_msg_attr_u32(&nw, PF_SCL_TICKET, lim->ticket); + snl_add_msg_attr_string(&nw, PF_SCL_NAME, lim->name); + snl_add_msg_attr_u32(&nw, PF_SCL_ID, lim->id); + snl_add_msg_attr_u32(&nw, PF_SCL_ENTRIES, lim->entries); + snl_add_msg_attr_u32(&nw, PF_SCL_LIMIT, lim->limit); + snl_add_msg_attr_limit_rate(&nw, PF_SCL_RATE, &lim->rate); + snl_add_msg_attr_string(&nw, PF_SCL_OVERLOAD_TBL_NAME, lim->overload_tblname); + snl_add_msg_attr_u32(&nw, PF_SCL_OVERLOAD_HIGH_WM, lim->overload_hwm); + snl_add_msg_attr_u32(&nw, PF_SCL_OVERLOAD_LOW_WM, lim->overload_lwm); + snl_add_msg_attr_u32(&nw, PF_SCL_INET_PREFIX, lim->inet_prefix); + snl_add_msg_attr_u32(&nw, PF_SCL_INET6_PREFIX, lim->inet6_prefix); + snl_add_msg_attr_string(&nw, PF_SCL_DESCR, lim->description); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + if (! snl_parse_nlmsg(&h->ss, hdr, &sourcelim_parser, &lim)) + continue; + } + + return (e.error); +} + +static int +_pfctl_source_limiter_get(struct pfctl_handle *h, int cmd, struct pfctl_source_lim *lim) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, cmd); + + snl_add_msg_attr_u32(&nw, PF_SCL_ID, lim->id); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + if (! snl_parse_nlmsg(&h->ss, hdr, &sourcelim_parser, lim)) + continue; + } + + return (e.error); +} + +int +pfctl_source_limiter_get(struct pfctl_handle *h, struct pfctl_source_lim *lim) +{ + return (_pfctl_source_limiter_get(h, PFNL_CMD_SOURCE_LIMITER_GET, lim)); +} + +int +pfctl_source_limiter_nget(struct pfctl_handle *h, struct pfctl_source_lim *lim) +{ + return (_pfctl_source_limiter_get(h, PFNL_CMD_SOURCE_LIMITER_NGET, lim)); +} + +#define _OUT(_field) offsetof(struct pfctl_source, _field) +static struct snl_attr_parser ap_source[] = { + { .type = PF_SRC_AF, .off = _OUT(af), .cb = snl_attr_get_uint8 }, + { .type = PF_SRC_RDOMAIN, .off = _OUT(rdomain), .cb = snl_attr_get_uint32 }, + { .type = PF_SRC_ADDR, .off = _OUT(addr), .cb = snl_attr_get_in6_addr }, + { .type = PF_SRC_INUSE, .off = _OUT(inuse), .cb = snl_attr_get_uint32 }, + { .type = PF_SRC_ADMITTED, .off = _OUT(admitted), .cb = snl_attr_get_uint64 }, + { .type = PF_SRC_HARDLIMITED, .off = _OUT(hardlimited), .cb = snl_attr_get_uint64 }, + { .type = PF_SRC_RATELIMITED, .off = _OUT(ratelimited), .cb = snl_attr_get_uint64 }, + { .type = PF_SRC_LIMIT, .off = _OUT(limit), .cb = snl_attr_get_uint32 }, + { .type = PF_SRC_INET_PREFIX, .off = _OUT(inet_prefix), .cb = snl_attr_get_uint32 }, + {. type = PF_SRC_INET6_PREFIX, .off = _OUT(inet6_prefix), .cb = snl_attr_get_uint32 }, +}; +#undef _OUT +SNL_DECLARE_PARSER(source_parser, struct genlmsghdr, snl_f_p_empty, ap_source); + +int +pfctl_source_get(struct pfctl_handle *h, int id, pfctl_get_source_fn fn, void *arg) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id, error; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_SOURCE_NGET); + + snl_add_msg_attr_u32(&nw, PF_SRC_ID, id); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + struct pfctl_source src; + + if (! snl_parse_nlmsg(&h->ss, hdr, &source_parser, &src)) + continue; + + error = fn(&src, arg); + if (error != 0) { + e.error = error; + break; + } + } + + return (e.error); +} + +int +pfctl_source_clear(struct pfctl_handle *h, struct pfctl_source_clear *kill) +{ + struct snl_writer nw; + struct snl_errmsg_data e = {}; + struct nlmsghdr *hdr; + uint32_t seq_id; + int family_id; + + family_id = snl_get_genl_family(&h->ss, PFNL_FAMILY_NAME); + if (family_id == 0) + return (ENOTSUP); + + snl_init_writer(&h->ss, &nw); + hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_SOURCE_CLEAR); + + snl_add_msg_attr_string(&nw, PF_SC_NAME, kill->name); + snl_add_msg_attr_u32(&nw, PF_SC_ID, kill->id); + snl_add_msg_attr_u32(&nw, PF_SC_RDOMAIN, kill->rdomain); + snl_add_msg_attr_u8(&nw, PF_SC_AF, kill->af); + snl_add_msg_attr_ip6(&nw, PF_SC_ADDR, &kill->addr.v6); + + if ((hdr = snl_finalize_msg(&nw)) == NULL) + return (ENXIO); + seq_id = hdr->nlmsg_seq; + + if (! snl_send_message(&h->ss, hdr)) + return (ENXIO); + + while ((hdr = snl_read_reply_multi(&h->ss, seq_id, &e)) != NULL) { + } + + return (e.error); +} + +static const struct snl_hdr_parser *all_parsers[] = { + &begin_addrs_parser, + &clear_states_parser, + &clr_addrs_parser, + &creator_parser, + &get_addr_parser, + &get_addrs_parser, + &get_limit_parser, + &get_timeout_parser, + &getrule_parser, + &getrules_parser, + &getstatus_parser, + &nadd_parser, + &natlook_parser, + &ndel_parser, + &ruleset_parser, + &skey_parser, + &source_parser, + &sourcelim_parser, + &speer_parser, + &srcnode_parser, + &state_parser, + &statelim_parser, + &table_add_addr_parser, + &table_astats_parser, + &table_del_addr_parser, + &table_get_addr_parser, + &table_set_addr_parser, + &tstats_clr_parser, + &tstats_parser, +}; + +static void +_pfctl_verify_parsers(void) +{ + SNL_VERIFY_PARSERS(all_parsers); +} diff --git a/lib/libpfctl/libpfctl.h b/lib/libpfctl/libpfctl.h index b885497ab0e8..1012be53db65 100644 --- a/lib/libpfctl/libpfctl.h +++ b/lib/libpfctl/libpfctl.h @@ -249,6 +249,14 @@ struct pfctl_rule { struct pf_rule_gid gid; char rcv_ifname[IFNAMSIZ]; bool rcvifnot; + struct { + uint8_t id; + int limiter_action; + } statelim; + struct { + uint8_t id; + int limiter_action; + } sourcelim; uint32_t rule_flag; uint8_t action; @@ -589,5 +597,106 @@ int pfctl_get_astats(struct pfctl_handle *h, const struct pfr_table *tbl, struct pfr_astats *addr, int *size, int flags); int pfctl_clr_astats(struct pfctl_handle *h, const struct pfr_table *tbl, struct pfr_addr *addr, int size, int *nzero, int flags); +int pfctl_test_addrs(struct pfctl_handle *h, const struct pfr_table *tbl, + struct pfr_addr *addr, int size, int *nmatch, int flags); + +struct pfctl_limit_rate { + unsigned int limit; + unsigned int seconds; +}; + +struct pfctl_state_lim { + uint32_t ticket; + char name[PF_STATELIM_NAME_LEN]; + uint32_t id; + unsigned int limit; + + struct pfctl_limit_rate rate; + + char description[PF_STATELIM_DESCR_LEN]; + + unsigned int inuse; + uint64_t admitted; + uint64_t hardlimited; + uint64_t ratelimited; +}; + +int pfctl_state_limiter_nget(struct pfctl_handle *h, struct pfctl_state_lim *lim); +int pfctl_state_limiter_add(struct pfctl_handle *h, struct pfctl_state_lim *lim); + +struct pfctl_source_lim { + uint32_t ticket; + + char name[PF_SOURCELIM_NAME_LEN]; + uint32_t id; + + /* limit on the total number of address entries */ + unsigned int entries; + + /* limit on the number of states per address entry */ + unsigned int limit; + + /* rate limit on the creation of states by an address entry */ + struct pfctl_limit_rate rate; + + /* + * when the number of states on an entry exceeds hwm, add + * the address to the specified table. when the number of + * states goes below lwm, remove it from the table. + */ + char overload_tblname[PF_TABLE_NAME_SIZE]; + unsigned int overload_hwm; + unsigned int overload_lwm; + + /* + * mask addresses before they're used for entries. /64s + * everywhere for inet6 makes it easy to use too much memory. + */ + unsigned int inet_prefix; + unsigned int inet6_prefix; + + char description[PF_SOURCELIM_DESCR_LEN]; + + unsigned int nentries; + unsigned int inuse; + + uint64_t addrallocs; + uint64_t addrnomem; + uint64_t admitted; + uint64_t addrlimited; + uint64_t hardlimited; + uint64_t ratelimited; +}; + +int pfctl_source_limiter_get(struct pfctl_handle *h, struct pfctl_source_lim *lim); +int pfctl_source_limiter_nget(struct pfctl_handle *h, struct pfctl_source_lim *lim); +int pfctl_source_limiter_add(struct pfctl_handle *h, struct pfctl_source_lim *lim); + +struct pfctl_source { + sa_family_t af; + unsigned int rdomain; + struct pf_addr addr; + + unsigned int inet_prefix; + unsigned int inet6_prefix; + + unsigned int limit; + unsigned int inuse; + uint64_t admitted; + uint64_t hardlimited; + uint64_t ratelimited; +}; +typedef int (*pfctl_get_source_fn)(struct pfctl_source *, void *); +int pfctl_source_get(struct pfctl_handle *h, int id, + pfctl_get_source_fn fn, void *arg); + +struct pfctl_source_clear { + char name[PF_SOURCELIM_NAME_LEN]; + uint32_t id; + sa_family_t af; + unsigned int rdomain; + struct pf_addr addr; +}; +int pfctl_source_clear(struct pfctl_handle *h, struct pfctl_source_clear *); #endif diff --git a/lib/libpmc/libpmc_pmu_util.c b/lib/libpmc/libpmc_pmu_util.c index 74a93ae963d7..de642aa71a18 100644 --- a/lib/libpmc/libpmc_pmu_util.c +++ b/lib/libpmc/libpmc_pmu_util.c @@ -498,18 +498,7 @@ pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, pm->pm_class = PMC_CLASS_K8; pe = pmu_event_get(NULL, event_name, &idx); - if (strcmp("l3cache", pe->topic) == 0){ - amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); - amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE; - amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice); - amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread); - } - else if (strcmp("data fabric", pe->topic) == 0){ - - amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event); - amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC; - } - else{ + if (pe->pmu == NULL) { amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE; if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 || @@ -526,7 +515,19 @@ pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, amd->pm_amd_config |= AMD_PMC_INVERT; if (pm->pm_caps & PMC_CAP_INTERRUPT) amd->pm_amd_config |= AMD_PMC_INT; + } else if (strcmp("amd_l3", pe->pmu) == 0) { + amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); + amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE; + amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice); + amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread); + } else if (strcmp("amd_df", pe->pmu) == 0) { + amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event); + amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC; + } else { + printf("PMC pmu '%s' is not supported!\n", pe->pmu); + return (EOPNOTSUPP); } + return (0); } diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/branch-prediction.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/branch-prediction.json new file mode 100644 index 000000000000..2d8d18cb85c1 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/branch-prediction.json @@ -0,0 +1,93 @@ +[ + { + "EventName": "bp_l1_tlb_miss_l2_tlb_hit", + "EventCode": "0x84", + "BriefDescription": "Instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB." + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x08" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.all", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for all page sizes.", + "UMask": "0x0f" + }, + { + "EventName": "bp_l2_btb_correct", + "EventCode": "0x8b", + "BriefDescription": "L2 branch prediction overrides existing prediction (speculative)." + }, + { + "EventName": "bp_dyn_ind_pred", + "EventCode": "0x8e", + "BriefDescription": "Dynamic indirect predictions (branch used the indirect predictor to make a prediction)." + }, + { + "EventName": "bp_de_redirect", + "EventCode": "0x91", + "BriefDescription": "Number of times an early redirect is sent to branch predictor. This happens when either the decoder or dispatch logic is able to detect that the branch predictor needs to be redirected." + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if4k", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 4k or coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if2m", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if1g", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.all", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for all page sizes.", + "UMask": "0x07" + }, + { + "EventName": "bp_redirects.resync", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor caused by resyncs.", + "UMask": "0x01" + }, + { + "EventName": "bp_redirects.ex_redir", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor caused by mispredicts.", + "UMask": "0x02" + }, + { + "EventName": "bp_redirects.all", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the branch predictor." + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/data-fabric.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/data-fabric.json new file mode 100644 index 000000000000..fa06569d881d --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/data-fabric.json @@ -0,0 +1,1634 @@ +[ + { + "EventName": "local_or_remote_socket_read_data_beats_dram_0", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 0.", + "EventCode": "0x1f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_1", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 1.", + "EventCode": "0x5f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_2", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 2.", + "EventCode": "0x9f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_3", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 3.", + "EventCode": "0xdf", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_4", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 4.", + "EventCode": "0x11f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_5", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 5.", + "EventCode": "0x15f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_6", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 6.", + "EventCode": "0x19f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_7", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 7.", + "EventCode": "0x1df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_8", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 8.", + "EventCode": "0x21f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_9", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 9.", + "EventCode": "0x25f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_10", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 10.", + "EventCode": "0x29f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_read_data_beats_dram_11", + "PublicDescription": "Read data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 11.", + "EventCode": "0x2df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_0", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 0.", + "EventCode": "0x1f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_1", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 1.", + "EventCode": "0x5f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_2", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 2.", + "EventCode": "0x9f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_3", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 3.", + "EventCode": "0xdf", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_4", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 4.", + "EventCode": "0x11f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_5", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 5.", + "EventCode": "0x15f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_6", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 6.", + "EventCode": "0x19f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_7", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 7.", + "EventCode": "0x1df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_8", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 8.", + "EventCode": "0x21f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_9", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 9.", + "EventCode": "0x25f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_10", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 10.", + "EventCode": "0x29f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_write_data_beats_dram_11", + "PublicDescription": "Write data beats (64 bytes) for transactions between local socket and DRAM Channel 11.", + "EventCode": "0x2df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_0", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 0.", + "EventCode": "0x1f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_1", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 1.", + "EventCode": "0x5f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_2", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 2.", + "EventCode": "0x9f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_3", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 3.", + "EventCode": "0xdf", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_4", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 4.", + "EventCode": "0x11f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_5", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 5.", + "EventCode": "0x15f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_6", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 6.", + "EventCode": "0x19f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_7", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 7.", + "EventCode": "0x1df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_8", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 8.", + "EventCode": "0x21f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_9", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 9.", + "EventCode": "0x25f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_10", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 10.", + "EventCode": "0x29f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_write_data_beats_dram_11", + "PublicDescription": "Write data beats (64 bytes) for transactions between remote socket and DRAM Channel 11.", + "EventCode": "0x2df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_0", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 0.", + "EventCode": "0x1f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_1", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 1.", + "EventCode": "0x5f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_2", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 2.", + "EventCode": "0x9f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_3", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 3.", + "EventCode": "0xdf", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_4", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 4.", + "EventCode": "0x11f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_5", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 5.", + "EventCode": "0x15f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_6", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 6.", + "EventCode": "0x19f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_7", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 7.", + "EventCode": "0x1df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_8", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 8.", + "EventCode": "0x21f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_9", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 9.", + "EventCode": "0x25f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_10", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 10.", + "EventCode": "0x29f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_write_data_beats_dram_11", + "PublicDescription": "Write data beats (64 bytes) for transactions between local or remote socket and DRAM Channel 11.", + "EventCode": "0x2df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_0", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_1", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_2", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_3", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_4", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_5", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_6", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_read_data_beats_io_7", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_0", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_1", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_2", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_3", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_4", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_5", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_6", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_upstream_write_data_beats_io_7", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_0", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_1", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_2", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_3", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_4", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_5", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_6", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_read_data_beats_io_7", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between remote socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_0", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_1", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_2", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_3", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_4", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_5", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_6", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_upstream_write_data_beats_io_7", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between remote socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_0", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_1", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_2", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_3", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_4", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_5", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_6", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_read_data_beats_io_7", + "PublicDescription": "Upstream DMA read data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_0", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 0.", + "EventCode": "0x81f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_1", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 1.", + "EventCode": "0x85f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_2", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 2.", + "EventCode": "0x89f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_3", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 3.", + "EventCode": "0x8df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_4", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 4.", + "EventCode": "0x91f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_5", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 5.", + "EventCode": "0x95f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_6", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 6.", + "EventCode": "0x99f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_upstream_write_data_beats_io_7", + "PublicDescription": "Upstream DMA write data beats (64 bytes) for transactions between local or remote socket and IO Root Complex 7.", + "EventCode": "0x9df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_0", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_1", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_2", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_3", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_4", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_5", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_6", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_7", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_8", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_9", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_10", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_11", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_12", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_13", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_14", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_cfi_15", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0x7fe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_0", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_1", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_2", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_3", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_4", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_5", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_6", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_7", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_8", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_9", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_10", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_11", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_12", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_13", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_14", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_cfi_15", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0x7ff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_0", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_1", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_2", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_3", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_4", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_5", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_6", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_7", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_8", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_9", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_10", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_11", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_12", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_13", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_14", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_inbound_data_beats_cfi_15", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between remote socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0xbfe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_0", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_1", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_2", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_3", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_4", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_5", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_6", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_7", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_8", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_9", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_10", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_11", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_12", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_13", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_14", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "remote_socket_outbound_data_beats_cfi_15", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between remote socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0xbff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_0", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_1", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_2", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_3", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_4", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_5", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_6", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_7", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_8", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_9", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_10", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_11", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_12", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_13", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_14", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_inbound_data_beats_cfi_15", + "PublicDescription": "Inbound data beats (32 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0xffe", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_0", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 0.", + "EventCode": "0x41e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_1", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 1.", + "EventCode": "0x45e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_2", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 2.", + "EventCode": "0x49e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_3", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 3.", + "EventCode": "0x4de", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_4", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 4.", + "EventCode": "0x51e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_5", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 5.", + "EventCode": "0x55e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_6", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 6.", + "EventCode": "0x59e", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_7", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 7.", + "EventCode": "0x5de", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_8", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 8.", + "EventCode": "0x41f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_9", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 9.", + "EventCode": "0x45f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_10", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 10.", + "EventCode": "0x49f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_11", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 11.", + "EventCode": "0x4df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_12", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 12.", + "EventCode": "0x51f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_13", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 13.", + "EventCode": "0x55f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_14", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 14.", + "EventCode": "0x59f", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_or_remote_socket_outbound_data_beats_cfi_15", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local or remote socket and Core-to-Fabric Interface 15.", + "EventCode": "0x5df", + "UMask": "0xfff", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_0", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 0.", + "EventCode": "0xd5f", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_1", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 1.", + "EventCode": "0xd9f", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_2", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 2.", + "EventCode": "0xddf", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_3", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 3.", + "EventCode": "0xe1f", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_4", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 4.", + "EventCode": "0xe5f", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_inbound_data_beats_link_5", + "PublicDescription": "Inbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 5.", + "EventCode": "0xe9f", + "UMask": "0xf3f", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_0", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 0.", + "EventCode": "0xd5f", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_1", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 1.", + "EventCode": "0xd9f", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_2", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 2.", + "EventCode": "0xddf", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_3", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 3.", + "EventCode": "0xe1f", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_4", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 4.", + "EventCode": "0xe5f", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + }, + { + "EventName": "local_socket_outbound_data_beats_link_5", + "PublicDescription": "Outbound data beats (64 bytes) for transactions between local socket and remote socket over Cross-socket Link 5.", + "EventCode": "0xe9f", + "UMask": "0xf3e", + "PerPkg": "1", + "Unit": "DFPMC" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/decode.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/decode.json new file mode 100644 index 000000000000..d0eff7f2a3ea --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/decode.json @@ -0,0 +1,115 @@ +[ + { + "EventName": "de_op_queue_empty", + "EventCode": "0xa9", + "BriefDescription": "Cycles where the op queue is empty. Such cycles indicate that the front-end is not delivering instructions fast enough." + }, + { + "EventName": "de_src_op_disp.x86_decoder", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from x86 decoder.", + "UMask": "0x01" + }, + { + "EventName": "de_src_op_disp.op_cache", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from op cache.", + "UMask": "0x02" + }, + { + "EventName": "de_src_op_disp.all", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from any source.", + "UMask": "0x07" + }, + { + "EventName": "de_dis_ops_from_decoder.any_fp_dispatch", + "EventCode": "0xab", + "BriefDescription": "Number of ops dispatched to the floating-point unit.", + "UMask": "0x04" + }, + { + "EventName": "de_dis_ops_from_decoder.any_integer_dispatch", + "EventCode": "0xab", + "BriefDescription": "Number of ops dispatched to the integer execution unit.", + "UMask": "0x08" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.int_phy_reg_file_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to an integer physical register file resource stall.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.load_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a lack of load queue tokens.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.store_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a lack of store queue tokens.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.taken_brnch_buffer_rsrc", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a taken branch buffer resource stall.", + "UMask": "0x10" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.fp_sch_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a floating-point non-schedulable queue token stall.", + "UMask": "0x40" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.al_tokens", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of ALU tokens.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.ag_tokens", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of agen tokens.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.ex_flush_recovery", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a pending integer execution flush recovery.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.retq", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of retire queue tokens.", + "UMask": "0x20" + }, + { + "EventName": "de_no_dispatch_per_slot.no_ops_from_frontend", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts dispatch slots left empty because the front-end did not supply ops.", + "UMask": "0x01" + }, + { + "EventName": "de_no_dispatch_per_slot.backend_stalls", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts ops unable to dispatch because of back-end stalls.", + "UMask": "0x1e" + }, + { + "EventName": "de_no_dispatch_per_slot.smt_contention", + "EventCode": "0x1a0", + "BriefDescription": "In each cycle counts ops unable to dispatch because the dispatch cycle was granted to the other SMT thread.", + "UMask": "0x60" + }, + { + "EventName": "de_additional_resource_stalls.dispatch_stalls", + "EventCode": "0x1a2", + "BriefDescription": "Counts additional cycles where dispatch is stalled due to a lack of dispatch resources.", + "UMask": "0x30" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/execution.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/execution.json new file mode 100644 index 000000000000..5a46d3db74e7 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/execution.json @@ -0,0 +1,174 @@ +[ + { + "EventName": "ex_ret_instr", + "EventCode": "0xc0", + "BriefDescription": "Retired instructions." + }, + { + "EventName": "ex_ret_ops", + "EventCode": "0xc1", + "BriefDescription": "Retired macro-ops." + }, + { + "EventName": "ex_ret_brn", + "EventCode": "0xc2", + "BriefDescription": "Retired branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_misp", + "EventCode": "0xc3", + "BriefDescription": "Retired branch instructions mispredicted." + }, + { + "EventName": "ex_ret_brn_tkn", + "EventCode": "0xc4", + "BriefDescription": "Retired taken branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_tkn_misp", + "EventCode": "0xc5", + "BriefDescription": "Retired taken branch instructions mispredicted." + }, + { + "EventName": "ex_ret_brn_far", + "EventCode": "0xc6", + "BriefDescription": "Retired far control transfers (far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts). Far control transfers are not subject to branch prediction." + }, + { + "EventName": "ex_ret_near_ret", + "EventCode": "0xc8", + "BriefDescription": "Retired near returns (RET or RET Iw)." + }, + { + "EventName": "ex_ret_near_ret_mispred", + "EventCode": "0xc9", + "BriefDescription": "Retired near returns mispredicted. Each misprediction incurs the same penalty as a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_brn_ind_misp", + "EventCode": "0xca", + "BriefDescription": "Retired indirect branch instructions mispredicted (only EX mispredicts). Each misprediction incurs the same penalty as a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_mmx_fp_instr.x87", + "EventCode": "0xcb", + "BriefDescription": "Retired x87 instructions.", + "UMask": "0x01" + }, + { + "EventName": "ex_ret_mmx_fp_instr.mmx", + "EventCode": "0xcb", + "BriefDescription": "Retired MMX instructions.", + "UMask": "0x02" + }, + { + "EventName": "ex_ret_mmx_fp_instr.sse", + "EventCode": "0xcb", + "BriefDescription": "Retired SSE instructions (includes SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42 and AVX).", + "UMask": "0x04" + }, + { + "EventName": "ex_ret_ind_brch_instr", + "EventCode": "0xcc", + "BriefDescription": "Retired indirect branch instructions." + }, + { + "EventName": "ex_ret_cond", + "EventCode": "0xd1", + "BriefDescription": "Retired conditional branch instructions." + }, + { + "EventName": "ex_div_busy", + "EventCode": "0xd3", + "BriefDescription": "Number of cycles the divider is busy." + }, + { + "EventName": "ex_div_count", + "EventCode": "0xd4", + "BriefDescription": "Divide ops executed." + }, + { + "EventName": "ex_no_retire.empty", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire due to the lack of valid ops in the retire queue (may be caused by front-end bottlenecks or pipeline redirects).", + "UMask": "0x01" + }, + { + "EventName": "ex_no_retire.not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire while the oldest op is waiting to be executed.", + "UMask": "0x02" + }, + { + "EventName": "ex_no_retire.other", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire caused by other reasons (retire breaks, traps, faults, etc.).", + "UMask": "0x08" + }, + { + "EventName": "ex_no_retire.thread_not_selected", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire because thread arbitration did not select the thread.", + "UMask": "0x10" + }, + { + "EventName": "ex_no_retire.load_not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire while the oldest op is waiting for load data.", + "UMask": "0xa2" + }, + { + "EventName": "ex_no_retire.all", + "EventCode": "0xd6", + "BriefDescription": "Cycles with no retire for any reason.", + "UMask": "0x1b" + }, + { + "EventName": "ex_ret_ucode_instr", + "EventCode": "0x1c1", + "BriefDescription": "Retired microcoded instructions." + }, + { + "EventName": "ex_ret_ucode_ops", + "EventCode": "0x1c2", + "BriefDescription": "Retired microcode ops." + }, + { + "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch", + "EventCode": "0x1c7", + "BriefDescription": "Retired branch instructions mispredicted due to direction mismatch." + }, + { + "EventName": "ex_ret_uncond_brnch_instr_mispred", + "EventCode": "0x1c8", + "BriefDescription": "Retired unconditional indirect branch instructions mispredicted." + }, + { + "EventName": "ex_ret_uncond_brnch_instr", + "EventCode": "0x1c9", + "BriefDescription": "Retired unconditional branch instructions." + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops", + "EventCode": "0x1cf", + "BriefDescription": "Ops tagged by IBS.", + "UMask": "0x01" + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret", + "EventCode": "0x1cf", + "BriefDescription": "Ops tagged by IBS that retired.", + "UMask": "0x02" + }, + { + "EventName": "ex_tagged_ibs_ops.ibs_count_rollover", + "EventCode": "0x1cf", + "BriefDescription": "Ops not tagged by IBS due to a previous tagged op that has not yet signaled interrupt.", + "UMask": "0x04" + }, + { + "EventName": "ex_ret_fused_instr", + "EventCode": "0x1d0", + "BriefDescription": "Retired fused instructions." + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/floating-point.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/floating-point.json new file mode 100644 index 000000000000..9204bfb1d69e --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/floating-point.json @@ -0,0 +1,812 @@ +[ + { + "EventName": "fp_ret_x87_fp_ops.add_sub_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point add and subtract ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_x87_fp_ops.mul_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point multiply ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_x87_fp_ops.div_sqrt_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point divide and square root ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_x87_fp_ops.all", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point ops of all types.", + "UMask": "0x07" + }, + { + "EventName": "fp_ret_sse_avx_ops.add_sub_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point add and subtract ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_sse_avx_ops.mult_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point multiply ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_sse_avx_ops.div_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point divide and square root ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_sse_avx_ops.mac_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point multiply-accumulate ops (each operation is counted as 2 ops).", + "UMask": "0x08" + }, + { + "EventName": "fp_ret_sse_avx_ops.bfloat16_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point bfloat16 ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point scalar single-precision ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point packed single-precision ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point scalar double-precision ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point packed double-precision ops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ret_sse_avx_ops.all", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_retired_by_width.x87_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired x87 floating-point ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_retired_by_width.mmx_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired MMX floating-point ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_retired_by_width.scalar_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired scalar floating-point ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_retired_by_width.pack_128_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 128-bit floating-point ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_retired_by_width.pack_256_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 256-bit floating-point ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_retired_by_width.pack_512_uops_retired", + "EventCode": "0x08", + "BriefDescription": "Retired packed 512-bit floating-point ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_retired_by_width.all", + "EventCode": "0x08", + "BriefDescription": "Retired floating-point ops of all widths.", + "UMask": "0x3f" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_add", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point add ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_div", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point divide ops.", + "UMask": "0x05" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point square root ops.", + "UMask": "0x06" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point compare ops.", + "UMask": "0x07" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point convert ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point blend ops.", + "UMask": "0x09" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_other", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_ops_retired_by_type.scalar_all", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_retired_by_type.vector_add", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point add ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_retired_by_type.vector_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_retired_by_type.vector_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "fp_ops_retired_by_type.vector_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_ops_retired_by_type.vector_div", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point divide ops.", + "UMask": "0x50" + }, + { + "EventName": "fp_ops_retired_by_type.vector_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point square root ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_ops_retired_by_type.vector_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point compare ops.", + "UMask": "0x70" + }, + { + "EventName": "fp_ops_retired_by_type.vector_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point convert ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_ops_retired_by_type.vector_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point blend ops.", + "UMask": "0x90" + }, + { + "EventName": "fp_ops_retired_by_type.vector_shuffle", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_logical", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_other", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_ops_retired_by_type.vector_all", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_ops_retired_by_type.all", + "EventCode": "0x0a", + "BriefDescription": "Retired floating-point ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "sse_avx_ops_retired.mmx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer add.", + "UMask": "0x01" + }, + { + "EventName": "sse_avx_ops_retired.mmx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "sse_avx_ops_retired.mmx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer compare ops.", + "UMask": "0x07" + }, + { + "EventName": "sse_avx_ops_retired.mmx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shift ops.", + "UMask": "0x09" + }, + { + "EventName": "sse_avx_ops_retired.mmx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer MOV ops.", + "UMask": "0x0a" + }, + { + "EventName": "sse_avx_ops_retired.mmx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "sse_avx_ops_retired.mmx_pack", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer pack ops.", + "UMask": "0x0c" + }, + { + "EventName": "sse_avx_ops_retired.mmx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "sse_avx_ops_retired.mmx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "sse_avx_ops_retired.mmx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer add ops.", + "UMask": "0x10" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_aes", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer AES ops.", + "UMask": "0x50" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_sha", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer SHA ops.", + "UMask": "0x60" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer compare ops.", + "UMask": "0x70" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_clm", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer CLM ops.", + "UMask": "0x80" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shift ops.", + "UMask": "0x90" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer MOV ops.", + "UMask": "0xa0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_pack", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer pack ops.", + "UMask": "0xc0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "sse_avx_ops_retired.sse_avx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "sse_avx_ops_retired.all", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE, AVX and MMX integer ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_pack_ops_retired.fp128_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point add ops.", + "UMask": "0x01" + }, + { + "EventName": "fp_pack_ops_retired.fp128_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "fp_pack_ops_retired.fp128_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "fp_pack_ops_retired.fp128_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "fp_pack_ops_retired.fp128_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point divide ops.", + "UMask": "0x05" + }, + { + "EventName": "fp_pack_ops_retired.fp128_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point square root ops.", + "UMask": "0x06" + }, + { + "EventName": "fp_pack_ops_retired.fp128_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point compare ops.", + "UMask": "0x07" + }, + { + "EventName": "fp_pack_ops_retired.fp128_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point convert ops.", + "UMask": "0x08" + }, + { + "EventName": "fp_pack_ops_retired.fp128_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point blend ops.", + "UMask": "0x09" + }, + { + "EventName": "fp_pack_ops_retired.fp128_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_pack_ops_retired.fp128_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_pack_ops_retired.fp128_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_pack_ops_retired.fp128_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_ops_retired.fp256_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point add ops.", + "UMask": "0x10" + }, + { + "EventName": "fp_pack_ops_retired.fp256_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "fp_pack_ops_retired.fp256_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "fp_pack_ops_retired.fp256_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "fp_pack_ops_retired.fp256_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point divide ops.", + "UMask": "0x50" + }, + { + "EventName": "fp_pack_ops_retired.fp256_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point square root ops.", + "UMask": "0x60" + }, + { + "EventName": "fp_pack_ops_retired.fp256_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point compare ops.", + "UMask": "0x70" + }, + { + "EventName": "fp_pack_ops_retired.fp256_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point convert ops.", + "UMask": "0x80" + }, + { + "EventName": "fp_pack_ops_retired.fp256_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point blend ops.", + "UMask": "0x90" + }, + { + "EventName": "fp_pack_ops_retired.fp256_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_pack_ops_retired.fp256_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_pack_ops_retired.all", + "EventCode": "0x0c", + "BriefDescription": "Retired packed floating-point ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "packed_int_op_type.int128_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer add ops.", + "UMask": "0x01" + }, + { + "EventName": "packed_int_op_type.int128_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer subtract ops.", + "UMask": "0x02" + }, + { + "EventName": "packed_int_op_type.int128_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply ops.", + "UMask": "0x03" + }, + { + "EventName": "packed_int_op_type.int128_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply-accumulate ops.", + "UMask": "0x04" + }, + { + "EventName": "packed_int_op_type.int128_aes", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer AES ops.", + "UMask": "0x05" + }, + { + "EventName": "packed_int_op_type.int128_sha", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer SHA ops.", + "UMask": "0x06" + }, + { + "EventName": "packed_int_op_type.int128_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer compare ops.", + "UMask": "0x07" + }, + { + "EventName": "packed_int_op_type.int128_clm", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer CLM ops.", + "UMask": "0x08" + }, + { + "EventName": "packed_int_op_type.int128_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shift ops.", + "UMask": "0x09" + }, + { + "EventName": "packed_int_op_type.int128_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer MOV ops.", + "UMask": "0x0a" + }, + { + "EventName": "packed_int_op_type.int128_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "packed_int_op_type.int128_pack", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer pack ops.", + "UMask": "0x0c" + }, + { + "EventName": "packed_int_op_type.int128_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer logical ops.", + "UMask": "0x0d" + }, + { + "EventName": "packed_int_op_type.int128_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer ops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "packed_int_op_type.int128_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer ops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "packed_int_op_type.int256_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer add ops.", + "UMask": "0x10" + }, + { + "EventName": "packed_int_op_type.int256_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer subtract ops.", + "UMask": "0x20" + }, + { + "EventName": "packed_int_op_type.int256_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply ops.", + "UMask": "0x30" + }, + { + "EventName": "packed_int_op_type.int256_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply-accumulate ops.", + "UMask": "0x40" + }, + { + "EventName": "packed_int_op_type.int256_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer compare ops.", + "UMask": "0x70" + }, + { + "EventName": "packed_int_op_type.int256_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shift ops.", + "UMask": "0x90" + }, + { + "EventName": "packed_int_op_type.int256_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer MOV ops.", + "UMask": "0xa0" + }, + { + "EventName": "packed_int_op_type.int256_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "packed_int_op_type.int256_pack", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer pack ops.", + "UMask": "0xc0" + }, + { + "EventName": "packed_int_op_type.int256_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer logical ops.", + "UMask": "0xd0" + }, + { + "EventName": "packed_int_op_type.int256_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer ops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "packed_int_op_type.int256_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer ops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "packed_int_op_type.all", + "EventCode": "0x0d", + "BriefDescription": "Retired packed integer ops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_disp_faults.x87_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for x87 fills.", + "UMask": "0x01" + }, + { + "EventName": "fp_disp_faults.xmm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for XMM fills.", + "UMask": "0x02" + }, + { + "EventName": "fp_disp_faults.ymm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM fills.", + "UMask": "0x04" + }, + { + "EventName": "fp_disp_faults.ymm_spill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM spills.", + "UMask": "0x08" + }, + { + "EventName": "fp_disp_faults.sse_avx_all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types for SSE and AVX ops.", + "UMask": "0x0e" + }, + { + "EventName": "fp_disp_faults.all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types.", + "UMask": "0x0f" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/inst-cache.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/inst-cache.json new file mode 100644 index 000000000000..ad75e5bf9513 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/inst-cache.json @@ -0,0 +1,72 @@ +[ + { + "EventName": "ic_cache_fill_l2", + "EventCode": "0x82", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from the L2 cache." + }, + { + "EventName": "ic_cache_fill_sys", + "EventCode": "0x83", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from system memory or another cache." + }, + { + "EventName": "ic_fetch_ibs_events.fetch_tagged", + "EventCode": "0x188", + "BriefDescription": "Fetches tagged by Fetch IBS. Not all tagged fetches result in a valid sample and an IBS interrupt.", + "UMask": "0x02" + }, + { + "EventName": "ic_fetch_ibs_events.sample_discarded", + "EventCode": "0x188", + "BriefDescription": "Fetches discarded after being tagged by Fetch IBS due to reasons other than IBS filtering.", + "UMask": "0x04" + }, + { + "EventName": "ic_fetch_ibs_events.sample_filtered", + "EventCode": "0x188", + "BriefDescription": "Fetches discarded after being tagged by Fetch IBS due to IBS filtering.", + "UMask": "0x08" + }, + { + "EventName": "ic_fetch_ibs_events.sample_valid", + "EventCode": "0x188", + "BriefDescription": "Fetches tagged by Fetch IBS that result in a valid sample and an IBS interrupt.", + "UMask": "0x10" + }, + { + "EventName": "ic_tag_hit_miss.instruction_cache_hit", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache hits.", + "UMask": "0x07" + }, + { + "EventName": "ic_tag_hit_miss.instruction_cache_miss", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache misses.", + "UMask": "0x18" + }, + { + "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses", + "EventCode": "0x18e", + "BriefDescription": "Instruction cache accesses of all types.", + "UMask": "0x1f" + }, + { + "EventName": "op_cache_hit_miss.op_cache_hit", + "EventCode": "0x28f", + "BriefDescription": "Op cache hits.", + "UMask": "0x03" + }, + { + "EventName": "op_cache_hit_miss.op_cache_miss", + "EventCode": "0x28f", + "BriefDescription": "Op cache misses.", + "UMask": "0x04" + }, + { + "EventName": "op_cache_hit_miss.all_op_cache_accesses", + "EventCode": "0x28f", + "BriefDescription": "Op cache accesses of all types.", + "UMask": "0x07" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/l2-cache.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/l2-cache.json new file mode 100644 index 000000000000..d1de51a02922 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/l2-cache.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "l2_request_g1.group2", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of non-cacheable type (non-cached data and instructions reads, self-modifying code checks).", + "UMask": "0x01" + }, + { + "EventName": "l2_request_g1.l2_hw_pf", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: from hardware prefetchers to prefetch directly into L2 (hit or miss).", + "UMask": "0x02" + }, + { + "EventName": "l2_request_g1.prefetch_l2_cmd", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: prefetch directly into L2.", + "UMask": "0x04" + }, + { + "EventName": "l2_request_g1.cacheable_ic_read", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: instruction cache reads.", + "UMask": "0x10" + }, + { + "EventName": "l2_request_g1.ls_rd_blk_c_s", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache shared reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g1.rd_blk_x", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache stores.", + "UMask": "0x40" + }, + { + "EventName": "l2_request_g1.rd_blk_l", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests: data cache reads including hardware and software prefetch.", + "UMask": "0x80" + }, + { + "EventName": "l2_request_g1.all_dc", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types from L1 data cache (including prefetches).", + "UMask": "0xe0" + }, + { + "EventName": "l2_request_g1.all_no_prefetch", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types not including prefetches.", + "UMask": "0xf1" + }, + { + "EventName": "l2_request_g1.all", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of all types.", + "UMask": "0xf7" + }, + { + "EventName": "l2_request_g2.ls_rd_sized_nc", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests: non-coherent, non-cacheable LS sized reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g2.ls_rd_sized", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests: coherent, non-cacheable LS sized reads.", + "UMask": "0x40" + }, + { + "EventName": "l2_wcb_req.wcb_close", + "EventCode": "0x63", + "BriefDescription": "Write Combining Buffer (WCB) closures.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_miss", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache request miss in L2.", + "UMask": "0x01" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit non-modifiable line in L2.", + "UMask": "0x02" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit modifiable line in L2.", + "UMask": "0x04" + }, + { + "EventName": "l2_cache_req_stat.ic_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache hits.", + "UMask": "0x06" + }, + { + "EventName": "l2_cache_req_stat.ic_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache access.", + "UMask": "0x07" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_c", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache request miss in L2.", + "UMask": "0x08" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache misses.", + "UMask": "0x09" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache store or state change hit in L2.", + "UMask": "0x10" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit non-modifiable line in L2.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit modifiable line in L2.", + "UMask": "0x40" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_cs", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache shared read hit in L2.", + "UMask": "0x80" + }, + { + "EventName": "l2_cache_req_stat.dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache hits.", + "UMask": "0xf0" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache hits.", + "UMask": "0xf6" + }, + { + "EventName": "l2_cache_req_stat.dc_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache access.", + "UMask": "0xf8" + }, + { + "EventName": "l2_cache_req_stat.all", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache access.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_hit_l2.l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_l3.l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_fill_rsp_src.local_ccx", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "l2_fill_rsp_src.near_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_near", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "l2_fill_rsp_src.far_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_far", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "l2_fill_rsp_src.alternate_memories", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "l2_fill_rsp_src.all", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills from all types of data sources.", + "UMask": "0xde" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/l3-cache.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/l3-cache.json new file mode 100644 index 000000000000..b50fe14d4520 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/l3-cache.json @@ -0,0 +1,177 @@ +[ + { + "EventName": "l3_lookup_state.l3_miss", + "EventCode": "0x04", + "BriefDescription": "L3 cache misses.", + "UMask": "0x01", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.l3_hit", + "EventCode": "0x04", + "BriefDescription": "L3 cache hits.", + "UMask": "0xfe", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.all_coherent_accesses_to_l3", + "EventCode": "0x04", + "BriefDescription": "L3 cache requests for all coherent accesses.", + "UMask": "0xff", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.near_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.far_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.all", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency from all data sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_near", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_far", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.near_cache", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.far_cache", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_near", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_far", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.all", + "EventCode": "0xad", + "BriefDescription": "L3 cache fill requests sourced from all data sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/load-store.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/load-store.json new file mode 100644 index 000000000000..ff6627a77805 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/load-store.json @@ -0,0 +1,517 @@ +[ + { + "EventName": "ls_bad_status2.stli_other", + "EventCode": "0x24", + "BriefDescription": "Store-to-load conflicts (load unable to complete due to a non-forwardable conflict with an older store).", + "UMask": "0x02" + }, + { + "EventName": "ls_locks.bus_lock", + "EventCode": "0x25", + "BriefDescription": "Retired Lock instructions which caused a bus lock.", + "UMask": "0x01" + }, + { + "EventName": "ls_ret_cl_flush", + "EventCode": "0x26", + "BriefDescription": "Retired CLFLUSH instructions." + }, + { + "EventName": "ls_ret_cpuid", + "EventCode": "0x27", + "BriefDescription": "Retired CPUID instructions." + }, + { + "EventName": "ls_dispatch.ld_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory load operations dispatched to the load-store unit.", + "UMask": "0x01" + }, + { + "EventName": "ls_dispatch.store_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory store operations dispatched to the load-store unit.", + "UMask": "0x02" + }, + { + "EventName": "ls_dispatch.ld_st_dispatch", + "EventCode": "0x29", + "BriefDescription": "Number of memory load-store operations dispatched to the load-store unit.", + "UMask": "0x04" + }, + { + "EventName": "ls_dispatch.all", + "EventCode": "0x29", + "BriefDescription": "Number of memory operations dispatched to the load-store unit.", + "UMask": "0x07" + }, + { + "EventName": "ls_smi_rx", + "EventCode": "0x2b", + "BriefDescription": "SMIs received." + }, + { + "EventName": "ls_int_taken", + "EventCode": "0x2c", + "BriefDescription": "Interrupts taken." + }, + { + "EventName": "ls_stlf", + "EventCode": "0x35", + "BriefDescription": "Store-to-load-forward (STLF) hits." + }, + { + "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full", + "EventCode": "0x37", + "BriefDescription": "Non-cacheable store commits cancelled due to the non-cacheable commit buffer being full.", + "UMask": "0x01" + }, + { + "EventName": "ls_mab_alloc.load_store_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for load-store allocations.", + "UMask": "0x3f" + }, + { + "EventName": "ls_mab_alloc.hardware_prefetcher_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for hardware prefetcher allocations.", + "UMask": "0x40" + }, + { + "EventName": "ls_mab_alloc.all_allocations", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for all types of allocations.", + "UMask": "0x7f" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_l2", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_ccx", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_dmnd_fills_from_sys.near_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_near", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_dmnd_fills_from_sys.far_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_dmnd_fills_from_sys.remote_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_far", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either DRAM or MMIO in the same or a different NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_dmnd_fills_from_sys.far_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).", + "UMask": "0x50" + }, + { + "EventName": "ls_dmnd_fills_from_sys.alternate_memories", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_dmnd_fills_from_sys.all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills from all types of data sources.", + "UMask": "0xff" + }, + { + "EventName": "ls_any_fills_from_sys.local_l2", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_any_fills_from_sys.local_ccx", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_any_fills_from_sys.local_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from local L2 cache or L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_any_fills_from_sys.near_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_near", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_any_fills_from_sys.far_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_any_fills_from_sys.remote_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_far", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either DRAM or MMIO in any NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_any_fills_from_sys.far_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).", + "UMask": "0x50" + }, + { + "EventName": "ls_any_fills_from_sys.alternate_memories", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_any_fills_from_sys.all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills from all types of data sources.", + "UMask": "0xff" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x02" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.", + "UMask": "0x04" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.", + "UMask": "0x08" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 4k pages.", + "UMask": "0x10" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.", + "UMask": "0x20" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 2M pages.", + "UMask": "0x40" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 1G pages.", + "UMask": "0x80" + }, + { + "EventName": "ls_l1_d_tlb_miss.all_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for all page sizes.", + "UMask": "0xf0" + }, + { + "EventName": "ls_l1_d_tlb_miss.all", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses for all page sizes.", + "UMask": "0xff" + }, + { + "EventName": "ls_misal_loads.ma64", + "EventCode": "0x47", + "BriefDescription": "64B misaligned (cacheline crossing) loads.", + "UMask": "0x01" + }, + { + "EventName": "ls_misal_loads.ma4k", + "EventCode": "0x47", + "BriefDescription": "4kB misaligned (page crossing) loads.", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchT0 (move data to all cache levels), T1 (move data to all cache levels except L1) and T2 (move data to all cache levels except L1 and L2).", + "UMask": "0x01" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_w", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchW (move data to L1 cache and mark it modifiable).", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_nta", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchNTA (move data with minimum cache pollution i.e. non-temporal access).", + "UMask": "0x04" + }, + { + "EventName": "ls_pref_instr_disp.all", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of all types.", + "UMask": "0x07" + }, + { + "EventName": "wcb_close.full_line_64b", + "EventCode": "0x50", + "BriefDescription": "Number of events that caused a Write Combining Buffer (WCB) entry to close because all 64 bytes of the entry have been written to.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a data cache hit.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.mab_mch_cnt", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a match on an already allocated Miss Address Buffer (MAB).", + "UMask": "0x02" + }, + { + "EventName": "ls_inef_sw_pref.all", + "EventCode": "0x52", + "BriefDescript6ion": "Software prefetches that did not fetch data outside of the processor core for any reason.", + "UMask": "0x03" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_l2", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_ccx", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_sw_pf_dc_fills.near_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_near", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_sw_pf_dc_fills.far_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_sw_pf_dc_fills.remote_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from cache of another CCX when the address was in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_far", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in the same or a different NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_sw_pf_dc_fills.far_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).", + "UMask": "0x50" + }, + { + "EventName": "ls_sw_pf_dc_fills.alternate_memories", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_sw_pf_dc_fills.all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_l2", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_ccx", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_hw_pf_dc_fills.near_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_near", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_hw_pf_dc_fills.far_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_hw_pf_dc_fills.remote_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_far", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).", + "UMask": "0x40" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in the same or a different NUMA node (same or different socket).", + "UMask": "0x48" + }, + { + "EventName": "ls_hw_pf_dc_fills.far_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).", + "UMask": "0x50" + }, + { + "EventName": "ls_hw_pf_dc_fills.alternate_memories", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from extension memory.", + "UMask": "0x80" + }, + { + "EventName": "ls_hw_pf_dc_fills.all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_alloc_mab_count", + "EventCode": "0x5f", + "BriefDescription": "In-flight L1 data cache misses i.e. Miss Address Buffer (MAB) allocations each cycle." + }, + { + "EventName": "ls_not_halted_cyc", + "EventCode": "0x76", + "BriefDescription": "Core cycles not in halt." + }, + { + "EventName": "ls_tlb_flush.all", + "EventCode": "0x78", + "BriefDescription": "All TLB Flushes.", + "UMask": "0xff" + }, + { + "EventName": "ls_not_halted_p0_cyc.p0_freq_cyc", + "EventCode": "0x120", + "BriefDescription": "Reference cycles (P0 frequency) not in halt .", + "UMask": "0x1" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/memory-controller.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/memory-controller.json new file mode 100644 index 000000000000..1a629fc9474a --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/memory-controller.json @@ -0,0 +1,101 @@ +[ + { + "EventName": "umc_mem_clk", + "PublicDescription": "Number of memory clock (MEMCLK) cycles.", + "EventCode": "0x00", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.all", + "PublicDescription": "Number of ACTIVATE commands sent.", + "EventCode": "0x05", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.rd", + "PublicDescription": "Number of ACTIVATE commands sent for reads.", + "EventCode": "0x05", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.wr", + "PublicDescription": "Number of ACTIVATE commands sent for writes.", + "EventCode": "0x05", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.all", + "PublicDescription": "Number of PRECHARGE commands sent.", + "EventCode": "0x06", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.rd", + "PublicDescription": "Number of PRECHARGE commands sent for reads.", + "EventCode": "0x06", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.wr", + "PublicDescription": "Number of PRECHARGE commands sent for writes.", + "EventCode": "0x06", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.all", + "PublicDescription": "Number of CAS commands sent.", + "EventCode": "0x0a", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.rd", + "PublicDescription": "Number of CAS commands sent for reads.", + "EventCode": "0x0a", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.wr", + "PublicDescription": "Number of CAS commands sent for writes.", + "EventCode": "0x0a", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.all", + "PublicDescription": "Number of clock cycles used by the data bus.", + "EventCode": "0x14", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.rd", + "PublicDescription": "Number of clock cycles used by the data bus for reads.", + "EventCode": "0x14", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.wr", + "PublicDescription": "Number of clock cycles used by the data bus for writes.", + "EventCode": "0x14", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/pipeline.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/pipeline.json new file mode 100644 index 000000000000..d860bf599cf2 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/pipeline.json @@ -0,0 +1,99 @@ +[ + { + "MetricName": "total_dispatch_slots", + "BriefDescription": "Total dispatch slots (up to 8 instructions can be dispatched in each cycle).", + "MetricExpr": "8 * ls_not_halted_cyc", + "ScaleUnit": "1slots" + }, + { + "MetricName": "frontend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because the frontend did not supply enough instructions/ops.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation", + "BriefDescription": "Percentage of dispatched ops that did not retire.", + "MetricExpr": "d_ratio(de_src_op_disp.all - ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because of backend stalls.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.backend_stalls, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "smt_contention", + "BriefDescription": "Percentage of dispatch slots that remained unused because the other thread was selected.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.smt_contention, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring", + "BriefDescription": "Percentage of dispatch slots used by ops that retired.", + "MetricExpr": "d_ratio(ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_latency", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a latency bottleneck in the frontend (such as instruction cache or TLB misses).", + "MetricExpr": "d_ratio((8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_bandwidth", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a bandwidth bottleneck in the frontend (such as decode or op cache fetch bandwidth).", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend - (8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation_from_mispredicts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to branch mispredicts.", + "MetricExpr": "d_ratio(bad_speculation * ex_ret_brn_misp, ex_ret_brn_misp + bp_redirects.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "bad_speculation_from_pipeline_restarts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to pipeline restarts (resyncs).", + "MetricExpr": "d_ratio(bad_speculation * bp_redirects.resync, ex_ret_brn_misp + bp_redirects.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound_by_memory", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls due to the memory subsystem.", + "MetricExpr": "backend_bound * d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete)", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "backend_bound_by_cpu", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls not related to the memory subsystem.", + "MetricExpr": "backend_bound * (1 - d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete))", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_fastpath", + "BriefDescription": "Percentage of dispatch slots used by fastpath ops that retired.", + "MetricExpr": "retiring * (1 - d_ratio(ex_ret_ucode_ops, ex_ret_ops))", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_microcode", + "BriefDescription": "Percentage of dispatch slots used by microcode ops that retired.", + "MetricExpr": "retiring * d_ratio(ex_ret_ucode_ops, ex_ret_ops)", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/amdzen5/recommended.json b/lib/libpmc/pmu-events/arch/x86/amdzen5/recommended.json new file mode 100644 index 000000000000..635d57e3bc15 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/amdzen5/recommended.json @@ -0,0 +1,457 @@ +[ + { + "MetricName": "branch_misprediction_rate", + "BriefDescription": "Execution-time branch misprediction rate (non-speculative).", + "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)", + "MetricGroup": "branch_prediction", + "ScaleUnit": "1per_branch" + }, + { + "MetricName": "all_data_cache_accesses_pti", + "BriefDescription": "All data cache accesses per thousand instructions.", + "MetricExpr": "ls_dispatch.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_accesses_pti", + "BriefDescription": "All L2 cache accesses per thousand instructions.", + "MetricExpr": "(l2_request_g1.all_no_prefetch + l2_pf_hit_l2.l2_hwpf + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_ic_misses_pti", + "BriefDescription": "L2 cache accesses from L1 instruction cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.cacheable_ic_read / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_dc_misses_pti", + "BriefDescription": "L2 cache accesses from L1 data cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.all_dc / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache accesses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_hit_l2.l1_dc_l2_hwpf + l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_misses_pti", + "BriefDescription": "All L2 cache misses per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache misses from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_fill_miss / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache misses from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ls_rd_blk_c / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache misses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_hits_pti", + "BriefDescription": "All L2 cache hits per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache hits from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache hits from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.dc_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l2_hwpf_pti", + "BriefDescription": "L2 cache hits from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "l2_pf_hit_l2.l1_dc_l2_hwpf / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l3_cache_accesses", + "BriefDescription": "L3 cache accesses.", + "MetricExpr": "l3_lookup_state.all_coherent_accesses_to_l3", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_misses", + "BriefDescription": "L3 misses (including cacheline state change requests).", + "MetricExpr": "l3_lookup_state.l3_miss", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_read_miss_latency", + "BriefDescription": "Average L3 read miss latency (in core clocks).", + "MetricExpr": "(l3_xi_sampled_latency.all * 10) / l3_xi_sampled_latency_requests.all", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_local_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for local DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_near * 10) / l3_xi_sampled_latency_requests.dram_near", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_remote_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for remote DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_far * 10) / l3_xi_sampled_latency_requests.dram_far", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "op_cache_fetch_miss_ratio", + "BriefDescription": "Op cache miss ratio for all fetches.", + "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)", + "ScaleUnit": "100%" + }, + { + "MetricName": "ic_fetch_miss_ratio", + "BriefDescription": "Instruction cache miss ratio for all fetches. An instruction cache miss will not be counted by this metric if it is an OC hit.", + "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)", + "ScaleUnit": "100%" + }, + { + "MetricName": "l1_data_cache_fills_from_memory_pti", + "BriefDescription": "L1 data cache fills from DRAM or MMIO in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.dram_io_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_remote_node_pti", + "BriefDescription": "L1 data cache fills from a different NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.far_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.local_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_different_ccx_pti", + "BriefDescription": "L1 data cache fills from another CCX cache in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.remote_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l1_data_cache_fills_pti", + "BriefDescription": "All L1 data cache fills per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_local_l2_pti", + "BriefDescription": "L1 demand data cache fills from local L2 cache per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_l2 / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 demand data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_ccx / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.near_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_near / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.far_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_far / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_itlb_misses_pti", + "BriefDescription": "L1 instruction TLB misses per thousand instructions.", + "MetricExpr": "(bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss.all) / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_itlb_misses_pti", + "BriefDescription": "L2 instruction TLB misses and instruction page walks per thousand instructions.", + "MetricExpr": "bp_l1_tlb_miss_l2_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_dtlb_misses_pti", + "BriefDescription": "L1 data TLB misses per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_dtlb_misses_pti", + "BriefDescription": "L2 data TLB misses and data page walks per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.all_l2_miss / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_tlbs_flushed_pti", + "BriefDescription": "All TLBs flushed per thousand instructions.", + "MetricExpr": "ls_tlb_flush.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "macro_ops_dispatched", + "BriefDescription": "Macro-ops dispatched.", + "MetricExpr": "de_src_op_disp.all", + "MetricGroup": "decoder" + }, + { + "MetricName": "sse_avx_stalls", + "BriefDescription": "Mixed SSE/AVX stalls.", + "MetricExpr": "fp_disp_faults.sse_avx_all" + }, + { + "MetricName": "macro_ops_retired", + "BriefDescription": "Macro-ops retired.", + "MetricExpr": "ex_ret_ops" + }, + { + "MetricName": "umc_data_bus_utilization", + "BriefDescription": "Memory controller data bus utilization.", + "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_rate", + "BriefDescription": "Memory controller CAS command rate.", + "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_cas_cmd_read_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for reads.", + "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_write_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for writes.", + "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_mem_read_bandwidth", + "BriefDescription": "Estimated memory read bandwidth.", + "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_write_bandwidth", + "BriefDescription": "Estimated memory write bandwidth.", + "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_bandwidth", + "BriefDescription": "Estimated combined memory bandwidth.", + "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_activate_cmd_rate", + "BriefDescription": "Memory controller ACTIVATE command rate.", + "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_precharge_cmd_rate", + "BriefDescription": "Memory controller PRECHARGE command rate.", + "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "dram_read_bandwidth_for_local_or_remote_socket", + "BriefDescription": "DRAM read data bandwidth for accesses in local or remote socket.", + "MetricExpr": "(local_or_remote_socket_read_data_beats_dram_0 + local_or_remote_socket_read_data_beats_dram_1 + local_or_remote_socket_read_data_beats_dram_2 + local_or_remote_socket_read_data_beats_dram_3 + local_or_remote_socket_read_data_beats_dram_4 + local_or_remote_socket_read_data_beats_dram_5 + local_or_remote_socket_read_data_beats_dram_6 + local_or_remote_socket_read_data_beats_dram_7 + local_or_remote_socket_read_data_beats_dram_8 + local_or_remote_socket_read_data_beats_dram_9 + local_or_remote_socket_read_data_beats_dram_10 + local_or_remote_socket_read_data_beats_dram_11) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "dram_write_bandwidth_for_local_socket", + "BriefDescription": "DRAM write data bandwidth for accesses in local socket.", + "MetricExpr": "(local_socket_write_data_beats_dram_0 + local_socket_write_data_beats_dram_1 + local_socket_write_data_beats_dram_2 + local_socket_write_data_beats_dram_3 + local_socket_write_data_beats_dram_4 + local_socket_write_data_beats_dram_5 + local_socket_write_data_beats_dram_6 + local_socket_write_data_beats_dram_7 + local_socket_write_data_beats_dram_8 + local_socket_write_data_beats_dram_9 + local_socket_write_data_beats_dram_10 + local_socket_write_data_beats_dram_11) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "dram_write_bandwidth_for_remote_socket", + "BriefDescription": "DRAM write data bandwidth for accesses in remote socket.", + "MetricExpr": "(remote_socket_write_data_beats_dram_0 + remote_socket_write_data_beats_dram_1 + remote_socket_write_data_beats_dram_2 + remote_socket_write_data_beats_dram_3 + remote_socket_write_data_beats_dram_4 + remote_socket_write_data_beats_dram_5 + remote_socket_write_data_beats_dram_6 + remote_socket_write_data_beats_dram_7 + remote_socket_write_data_beats_dram_8 + remote_socket_write_data_beats_dram_9 + remote_socket_write_data_beats_dram_10 + remote_socket_write_data_beats_dram_11) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "dram_write_bandwidth_for_local_or_remote_socket", + "BriefDescription": "DRAM write data bandwidth for accesses in local or remote socket.", + "MetricExpr": "(local_or_remote_socket_write_data_beats_dram_0 + local_or_remote_socket_write_data_beats_dram_1 + local_or_remote_socket_write_data_beats_dram_2 + local_or_remote_socket_write_data_beats_dram_3 + local_or_remote_socket_write_data_beats_dram_4 + local_or_remote_socket_write_data_beats_dram_5 + local_or_remote_socket_write_data_beats_dram_6 + local_or_remote_socket_write_data_beats_dram_7 + local_or_remote_socket_write_data_beats_dram_8 + local_or_remote_socket_write_data_beats_dram_9 + local_or_remote_socket_write_data_beats_dram_10 + local_or_remote_socket_write_data_beats_dram_11) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "upstream_dma_read_bandwidth_for_local_socket", + "BriefDescription": "Upstream DMA read data bandwidth for accesses in local socket.", + "MetricExpr": "(local_socket_upstream_read_data_beats_io_0 + local_socket_upstream_read_data_beats_io_1 + local_socket_upstream_read_data_beats_io_2 + local_socket_upstream_read_data_beats_io_3 + local_socket_upstream_read_data_beats_io_4 + local_socket_upstream_read_data_beats_io_5 + local_socket_upstream_read_data_beats_io_6 + local_socket_upstream_read_data_beats_io_7) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "upstream_dma_write_bandwidth_for_local_socket", + "BriefDescription": "Upstream DMA write data bandwidth for accesses in local socket.", + "MetricExpr": "(local_socket_upstream_write_data_beats_io_0 + local_socket_upstream_write_data_beats_io_1 + local_socket_upstream_write_data_beats_io_2 + local_socket_upstream_write_data_beats_io_3 + local_socket_upstream_write_data_beats_io_4 + local_socket_upstream_write_data_beats_io_5 + local_socket_upstream_write_data_beats_io_6 + local_socket_upstream_write_data_beats_io_7) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "upstream_dma_read_bandwidth_for_remote_socket", + "BriefDescription": "Upstream DMA read data bandwidth for accesses in remote socket.", + "MetricExpr": "(remote_socket_upstream_read_data_beats_io_0 + remote_socket_upstream_read_data_beats_io_1 + remote_socket_upstream_read_data_beats_io_2 + remote_socket_upstream_read_data_beats_io_3 + remote_socket_upstream_read_data_beats_io_4 + remote_socket_upstream_read_data_beats_io_5 + remote_socket_upstream_read_data_beats_io_6 + remote_socket_upstream_read_data_beats_io_7) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "upstream_dma_write_bandwidth_for_remote_socket", + "BriefDescription": "Upstream DMA write data bandwidth for accesses in remote socket.", + "MetricExpr": "(remote_socket_upstream_write_data_beats_io_0 + remote_socket_upstream_write_data_beats_io_1 + remote_socket_upstream_write_data_beats_io_2 + remote_socket_upstream_write_data_beats_io_3 + remote_socket_upstream_write_data_beats_io_4 + remote_socket_upstream_write_data_beats_io_5 + remote_socket_upstream_write_data_beats_io_6 + remote_socket_upstream_write_data_beats_io_7) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "core_inbound_data_bandwidth_for_local_socket", + "BriefDescription": "Core inbound data bandwidth for accesses in local socket.", + "MetricExpr": "(local_socket_inbound_data_beats_cfi_0 + local_socket_inbound_data_beats_cfi_1 + local_socket_inbound_data_beats_cfi_2 + local_socket_inbound_data_beats_cfi_3 + local_socket_inbound_data_beats_cfi_4 + local_socket_inbound_data_beats_cfi_5 + local_socket_inbound_data_beats_cfi_6 + local_socket_inbound_data_beats_cfi_7 + local_socket_inbound_data_beats_cfi_8 + local_socket_inbound_data_beats_cfi_9 + local_socket_inbound_data_beats_cfi_10 + local_socket_inbound_data_beats_cfi_11 + local_socket_inbound_data_beats_cfi_12 + local_socket_inbound_data_beats_cfi_13 + local_socket_inbound_data_beats_cfi_14 + local_socket_inbound_data_beats_cfi_15) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "3.2e-5MB/s" + }, + { + "MetricName": "core_outbound_data_bandwidth_for_local_socket", + "BriefDescription": "Core outbound data bandwidth for accesses in local socket.", + "MetricExpr": "(local_socket_outbound_data_beats_cfi_0 + local_socket_outbound_data_beats_cfi_1 + local_socket_outbound_data_beats_cfi_2 + local_socket_outbound_data_beats_cfi_3 + local_socket_outbound_data_beats_cfi_4 + local_socket_outbound_data_beats_cfi_5 + local_socket_outbound_data_beats_cfi_6 + local_socket_outbound_data_beats_cfi_7 + local_socket_outbound_data_beats_cfi_8 + local_socket_outbound_data_beats_cfi_9 + local_socket_outbound_data_beats_cfi_10 + local_socket_outbound_data_beats_cfi_11 + local_socket_outbound_data_beats_cfi_12 + local_socket_outbound_data_beats_cfi_13 + local_socket_outbound_data_beats_cfi_14 + local_socket_outbound_data_beats_cfi_15) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "core_inbound_data_bandwidth_for_remote_socket", + "BriefDescription": "Core inbound data bandwidth for accesses in remote socket.", + "MetricExpr": "(remote_socket_inbound_data_beats_cfi_0 + remote_socket_inbound_data_beats_cfi_1 + remote_socket_inbound_data_beats_cfi_2 + remote_socket_inbound_data_beats_cfi_3 + remote_socket_inbound_data_beats_cfi_4 + remote_socket_inbound_data_beats_cfi_5 + remote_socket_inbound_data_beats_cfi_6 + remote_socket_inbound_data_beats_cfi_7 + remote_socket_inbound_data_beats_cfi_8 + remote_socket_inbound_data_beats_cfi_9 + remote_socket_inbound_data_beats_cfi_10 + remote_socket_inbound_data_beats_cfi_11 + remote_socket_inbound_data_beats_cfi_12 + remote_socket_inbound_data_beats_cfi_13 + remote_socket_inbound_data_beats_cfi_14 + remote_socket_inbound_data_beats_cfi_15) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "3.2e-5MB/s" + }, + { + "MetricName": "core_outbound_data_bandwidth_for_remote_socket", + "BriefDescription": "Core outbound data bandwidth for accesses in remote socket.", + "MetricExpr": "(remote_socket_outbound_data_beats_cfi_0 + remote_socket_outbound_data_beats_cfi_1 + remote_socket_outbound_data_beats_cfi_2 + remote_socket_outbound_data_beats_cfi_3 + remote_socket_outbound_data_beats_cfi_4 + remote_socket_outbound_data_beats_cfi_5 + remote_socket_outbound_data_beats_cfi_6 + remote_socket_outbound_data_beats_cfi_7 + remote_socket_outbound_data_beats_cfi_8 + remote_socket_outbound_data_beats_cfi_9 + remote_socket_outbound_data_beats_cfi_10 + remote_socket_outbound_data_beats_cfi_11 + remote_socket_outbound_data_beats_cfi_12 + remote_socket_outbound_data_beats_cfi_13 + remote_socket_outbound_data_beats_cfi_14 + remote_socket_outbound_data_beats_cfi_15) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "cross_socket_inbound_data_bandwidth_for_local_socket", + "BriefDescription": "Inbound data bandwidth for accesses between local socket and remote socket.", + "MetricExpr": "(local_socket_inbound_data_beats_link_0 + local_socket_inbound_data_beats_link_1 + local_socket_inbound_data_beats_link_2 + local_socket_inbound_data_beats_link_3 + local_socket_inbound_data_beats_link_4 + local_socket_inbound_data_beats_link_5) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + }, + { + "MetricName": "cross_socket_outbound_data_bandwidth_for_local_socket", + "BriefDescription": "Outbound data bandwidth for accesses between local socket and remote socket.", + "MetricExpr": "(local_socket_outbound_data_beats_link_0 + local_socket_outbound_data_beats_link_1 + local_socket_outbound_data_beats_link_2 + local_socket_outbound_data_beats_link_3 + local_socket_outbound_data_beats_link_4 + local_socket_outbound_data_beats_link_5) / duration_time", + "MetricGroup": "data_fabric", + "PerPkg": "1", + "ScaleUnit": "6.4e-5MB/s" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/cache.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/cache.json new file mode 100644 index 000000000000..135264595c11 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/cache.json @@ -0,0 +1,1230 @@ +[ + { + "BriefDescription": "L1D.HWPF_MISS", + "Counter": "0,1,2,3", + "EventCode": "0x51", + "EventName": "L1D.HWPF_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.", + "Counter": "0,1,2,3", + "EventCode": "0x51", + "EventName": "L1D.REPLACEMENT", + "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.FB_FULL", + "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS", + "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.L2_STALLS", + "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Number of L1D misses that are outstanding", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.PENDING", + "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles with L1D load Misses outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.PENDING_CYCLES", + "PublicDescription": "Counts duration of L1D miss outstanding in cycles.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "L2 cache lines filling L2", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.ALL", + "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.", + "SampleAfterValue": "100003", + "UMask": "0x1f" + }, + { + "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.NON_SILENT", + "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.SILENT", + "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache. These lines are typically in Shared or Exclusive state. A non-threaded event.", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.USELESS_HWPF", + "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { + "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.ALL", + "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]", + "SampleAfterValue": "200003", + "UMask": "0xff" + }, + { + "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.HIT", + "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]", + "SampleAfterValue": "200003", + "UMask": "0xdf" + }, + { + "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.MISS", + "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]", + "SampleAfterValue": "200003", + "UMask": "0x3f" + }, + { + "BriefDescription": "L2 code requests", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_CODE_RD", + "PublicDescription": "Counts the total number of L2 code requests.", + "SampleAfterValue": "200003", + "UMask": "0xe4" + }, + { + "BriefDescription": "Demand Data Read access L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD", + "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.", + "SampleAfterValue": "200003", + "UMask": "0xe1" + }, + { + "BriefDescription": "Demand requests that miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_MISS", + "PublicDescription": "Counts demand requests that miss L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x27" + }, + { + "BriefDescription": "Demand requests to L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES", + "PublicDescription": "Counts demand requests to L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xe7" + }, + { + "BriefDescription": "L2_RQSTS.ALL_HWPF", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_HWPF", + "SampleAfterValue": "200003", + "UMask": "0xf0" + }, + { + "BriefDescription": "RFO requests to L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_RFO", + "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.", + "SampleAfterValue": "200003", + "UMask": "0xe2" + }, + { + "BriefDescription": "L2 cache hits when fetching instructions, code reads.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.CODE_RD_HIT", + "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.", + "SampleAfterValue": "200003", + "UMask": "0xc4" + }, + { + "BriefDescription": "L2 cache misses when fetching instructions", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.CODE_RD_MISS", + "PublicDescription": "Counts L2 cache misses when fetching instructions.", + "SampleAfterValue": "200003", + "UMask": "0x24" + }, + { + "BriefDescription": "Demand Data Read requests that hit L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", + "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xc1" + }, + { + "BriefDescription": "Demand Data Read miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS", + "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.", + "SampleAfterValue": "200003", + "UMask": "0x21" + }, + { + "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.HIT", + "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]", + "SampleAfterValue": "200003", + "UMask": "0xdf" + }, + { + "BriefDescription": "L2_RQSTS.HWPF_MISS", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.HWPF_MISS", + "SampleAfterValue": "200003", + "UMask": "0x30" + }, + { + "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.MISS", + "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]", + "SampleAfterValue": "200003", + "UMask": "0x3f" + }, + { + "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.REFERENCES", + "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]", + "SampleAfterValue": "200003", + "UMask": "0xff" + }, + { + "BriefDescription": "RFO requests that hit L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.RFO_HIT", + "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xc2" + }, + { + "BriefDescription": "RFO requests that miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.RFO_MISS", + "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x22" + }, + { + "BriefDescription": "SW prefetch requests that hit L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.SWPF_HIT", + "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.", + "SampleAfterValue": "200003", + "UMask": "0xc8" + }, + { + "BriefDescription": "SW prefetch requests that miss L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.SWPF_MISS", + "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.", + "SampleAfterValue": "200003", + "UMask": "0x28" + }, + { + "BriefDescription": "L2 writebacks that access L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x23", + "EventName": "L2_TRANS.L2_WB", + "PublicDescription": "Counts L2 writebacks that access L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x40" + }, + { + "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.MISS", + "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.", + "SampleAfterValue": "100003", + "UMask": "0x41" + }, + { + "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.REFERENCE", + "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.", + "SampleAfterValue": "100003", + "UMask": "0x4f" + }, + { + "BriefDescription": "Retired load instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ALL_LOADS", + "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x81" + }, + { + "BriefDescription": "Retired store instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ALL_STORES", + "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x82" + }, + { + "BriefDescription": "All retired memory instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ANY", + "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x83" + }, + { + "BriefDescription": "Retired load instructions with locked access.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.LOCK_LOADS", + "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0", + "RetirementLatencyMax": "5156", + "RetirementLatencyMean": "63.76", + "RetirementLatencyMin": "15", + "SampleAfterValue": "100007", + "UMask": "0x21" + }, + { + "BriefDescription": "Retired load instructions that split across a cacheline boundary.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.SPLIT_LOADS", + "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0", + "RetirementLatencyMax": "4704", + "RetirementLatencyMean": "3.97", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100003", + "UMask": "0x41" + }, + { + "BriefDescription": "Retired store instructions that split across a cacheline boundary.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.SPLIT_STORES", + "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0", + "RetirementLatencyMax": "65535", + "RetirementLatencyMean": "19.0", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100003", + "UMask": "0x42" + }, + { + "BriefDescription": "Retired load instructions that hit the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS", + "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "RetirementLatencyMax": "3424", + "RetirementLatencyMean": "1.57", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100003", + "UMask": "0x9" + }, + { + "BriefDescription": "Retired store instructions that hit the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_HIT_STORES", + "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "RetirementLatencyMax": "65535", + "RetirementLatencyMean": "5.24", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100003", + "UMask": "0xa" + }, + { + "BriefDescription": "Retired load instructions that miss the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS", + "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x11" + }, + { + "BriefDescription": "Retired store instructions that miss the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES", + "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x12" + }, + { + "BriefDescription": "Completed demand load uops that miss the L1 d-cache.", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY", + "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)", + "SampleAfterValue": "1000003", + "UMask": "0xfd" + }, + { + "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", + "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0", + "RetirementLatencyMax": "4472", + "RetirementLatencyMean": "353.04", + "RetirementLatencyMin": "0", + "SampleAfterValue": "20011", + "UMask": "0x4" + }, + { + "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", + "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0", + "RetirementLatencyMax": "830", + "RetirementLatencyMean": "125.27", + "RetirementLatencyMin": "0", + "SampleAfterValue": "20011", + "UMask": "0x1" + }, + { + "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE", + "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", + "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0", + "RetirementLatencyMax": "3939", + "RetirementLatencyMean": "289.9", + "RetirementLatencyMin": "0", + "SampleAfterValue": "20011", + "UMask": "0x2" + }, + { + "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from dram homed in the local socket", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", + "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from DRAM homed in the local socket. Available PDIST counters: 0", + "RetirementLatencyMax": "4146", + "RetirementLatencyMean": "115.83", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x1" + }, + { + "BriefDescription": "Retired load instructions with remote cxl mem as the data source where the data request missed all caches.", + "Counter": "0,1,2,3", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_CXL_MEM", + "PublicDescription": "Counts retired load instructions with remote cxl mem as the data source and the data request missed L3. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x10" + }, + { + "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", + "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0", + "RetirementLatencyMax": "3572", + "RetirementLatencyMean": "430.22", + "RetirementLatencyMin": "0", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", + "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache. Available PDIST counters: 0", + "RetirementLatencyMax": "8552", + "RetirementLatencyMean": "125.36", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x8" + }, + { + "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", + "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0", + "RetirementLatencyMax": "2580", + "RetirementLatencyMean": "135.29", + "RetirementLatencyMin": "0", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_MISC_RETIRED.UC", + "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x4" + }, + { + "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.FB_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x40" + }, + { + "BriefDescription": "Retired load instructions with L1 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L1_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Retired load instructions missed L1 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L1_MISS", + "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0", + "SampleAfterValue": "200003", + "UMask": "0x8" + }, + { + "BriefDescription": "Retired load instructions with L2 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L2_HIT", + "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0", + "RetirementLatencyMax": "7140", + "RetirementLatencyMean": "5.71", + "RetirementLatencyMin": "0", + "SampleAfterValue": "200003", + "UMask": "0x2" + }, + { + "BriefDescription": "Retired load instructions missed L2 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L2_MISS", + "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0", + "SampleAfterValue": "100021", + "UMask": "0x10" + }, + { + "BriefDescription": "Retired load instructions with L3 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L3_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0", + "RetirementLatencyMax": "5630", + "RetirementLatencyMean": "57.64", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100021", + "UMask": "0x4" + }, + { + "BriefDescription": "Retired load instructions missed L3 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L3_MISS", + "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "50021", + "UMask": "0x20" + }, + { + "BriefDescription": "Retired load instructions with local cxl mem as the data source where the data request missed all caches.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.LOCAL_CXL_MEM", + "PublicDescription": "Counts retired load instructions with local cxl mem as the data source and the data request missed L3. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x80" + }, + { + "BriefDescription": "MEM_STORE_RETIRED.L2_HIT", + "Counter": "0,1,2,3", + "EventCode": "0x44", + "EventName": "MEM_STORE_RETIRED.L2_HIT", + "SampleAfterValue": "200003", + "UMask": "0x1" + }, + { + "BriefDescription": "Retired memory uops for any access", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe5", + "EventName": "MEM_UOP_RETIRED.ANY", + "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10001", + "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3).", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703C00001", + "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0001", + "PublicDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0001", + "PublicDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x4003C0001", + "PublicDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x8003C0001", + "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.LOCAL_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x700C00001", + "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1030000001", + "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x830000001", + "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703000001", + "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1008000001", + "PublicDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x808000001", + "PublicDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F3FFC0002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3).", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703C00002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.LOCAL_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x700C00002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.REMOTE_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703000002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10808", + "PublicDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F3FFC4477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3).", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703C04477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F003C4477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C4477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.LOCAL_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x700C04477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F33004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1830004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1030004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x830004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_CXL_MEM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x703004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1008004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x808004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.RFO_TO_CORE.L3_HIT_M", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x1F80040022", + "PublicDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Any memory transaction that reached the SQ.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS", + "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Demand and prefetch data reads", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DATA_RD", + "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cacheable and Non-Cacheable code read requests", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD", + "PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Demand Data Read requests sent to uncore", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD", + "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_RFO", + "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Offcore Uncacheable memory data read transactions.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.MEM_UC", + "PublicDescription": "This event counts noncacheable memory data read transactions.", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD", + "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", + "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD", + "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", + "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", + "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.", + "Counter": "0,1,2,3", + "EventCode": "0x2c", + "EventName": "SQ_MISC.BUS_LOCK", + "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.ANY", + "SampleAfterValue": "100003", + "UMask": "0xf" + }, + { + "BriefDescription": "Number of PREFETCHNTA instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.NTA", + "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of PREFETCHW instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.PREFETCHW", + "PublicDescription": "Counts the number of PREFETCHW instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Number of PREFETCHT0 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.T0", + "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.T1_T2", + "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x4" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/floating-point.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/floating-point.json new file mode 100644 index 000000000000..d4ca5ec61f29 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/floating-point.json @@ -0,0 +1,242 @@ +[ + { + "BriefDescription": "This event counts the cycles the floating point divider is busy.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all microcode FP assists.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.FP", + "PublicDescription": "Counts all microcode Floating Point assists.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "ASSISTS.SSE_AVX_MIX", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.SSE_AVX_MIX", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_0", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_1", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_5", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", + "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x18" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x40" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", + "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS", + "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x60" + }, + { + "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR", + "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE", + "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of any Vector retired FP arithmetic instructions", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.VECTOR", + "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "1000003", + "UMask": "0xfc" + }, + { + "BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.SCALAR", + "PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR", + "SampleAfterValue": "100003", + "UMask": "0x3" + }, + { + "BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcf", + "EventName": "FP_ARITH_INST_RETIRED2.VECTOR", + "PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR", + "SampleAfterValue": "100003", + "UMask": "0x1c" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/frontend.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/frontend.json new file mode 100644 index 000000000000..e88986601dda --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/frontend.json @@ -0,0 +1,476 @@ +[ + { + "BriefDescription": "Clears due to Unknown Branches.", + "Counter": "0,1,2,3", + "EventCode": "0x60", + "EventName": "BACLEARS.ANY", + "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Stalls caused by changing prefix length of the instruction.", + "Counter": "0,1,2,3", + "EventCode": "0x87", + "EventName": "DECODE.LCP", + "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.", + "SampleAfterValue": "500009", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles the Microcode Sequencer is busy.", + "Counter": "0,1,2,3", + "EventCode": "0x87", + "EventName": "DECODE.MS_BUSY", + "SampleAfterValue": "500009", + "UMask": "0x2" + }, + { + "BriefDescription": "DSB-to-MITE switch true penalty cycles.", + "Counter": "0,1,2,3", + "EventCode": "0x61", + "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES", + "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Retired ANT branches", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ANY_ANT", + "MSRIndex": "0x3F7", + "MSRValue": "0x9", + "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced DSB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x1", + "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0", + "RetirementLatencyMax": "65535", + "RetirementLatencyMean": "2.46", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced a critical DSB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.DSB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x11", + "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced iTLB true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x14", + "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0", + "RetirementLatencyMax": "980", + "RetirementLatencyMean": "41.96", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.L1I_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x12", + "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0", + "RetirementLatencyMax": "1785", + "RetirementLatencyMean": "9.83", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.L2_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x13", + "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0", + "RetirementLatencyMax": "2854", + "RetirementLatencyMean": "137.41", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_1", + "MSRIndex": "0x3F7", + "MSRValue": "0x600106", + "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_128", + "MSRIndex": "0x3F7", + "MSRValue": "0x608006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_16", + "MSRIndex": "0x3F7", + "MSRValue": "0x601006", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2", + "MSRIndex": "0x3F7", + "MSRValue": "0x600206", + "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_256", + "MSRIndex": "0x3F7", + "MSRValue": "0x610006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1", + "MSRIndex": "0x3F7", + "MSRValue": "0x100206", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_32", + "MSRIndex": "0x3F7", + "MSRValue": "0x602006", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_4", + "MSRIndex": "0x3F7", + "MSRValue": "0x600406", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_512", + "MSRIndex": "0x3F7", + "MSRValue": "0x620006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_64", + "MSRIndex": "0x3F7", + "MSRValue": "0x604006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_8", + "MSRIndex": "0x3F7", + "MSRValue": "0x600806", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "I-Cache miss too close to Code Prefetch Instruction", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATE_SWPF", + "MSRIndex": "0x3F7", + "MSRValue": "0xA", + "PublicDescription": "Number of Instruction Cache demand miss in shadow of an on-going i-fetch cache-line triggered by PREFETCHIT0/1 instructions Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Mispredicted Retired ANT branches", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.MISP_ANT", + "MSRIndex": "0x3F7", + "MSRValue": "0x9", + "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x2" + }, + { + "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.MS_FLOWS", + "MSRIndex": "0x3F7", + "MSRValue": "0x8", + "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0", + "RetirementLatencyMax": "65535", + "RetirementLatencyMean": "77.14", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.STLB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x15", + "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0", + "RetirementLatencyMax": "754", + "RetirementLatencyMean": "206.85", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH", + "MSRIndex": "0x3F7", + "MSRValue": "0x17", + "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0", + "RetirementLatencyMax": "532", + "RetirementLatencyMean": "3.85", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100007", + "UMask": "0x3" + }, + { + "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.", + "Counter": "0,1,2,3", + "EventCode": "0x80", + "EventName": "ICACHE_DATA.STALLS", + "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.", + "SampleAfterValue": "500009", + "UMask": "0x4" + }, + { + "BriefDescription": "ICACHE_DATA.STALL_PERIODS", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x80", + "EventName": "ICACHE_DATA.STALL_PERIODS", + "SampleAfterValue": "500009", + "UMask": "0x4" + }, + { + "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.", + "Counter": "0,1,2,3", + "EventCode": "0x83", + "EventName": "ICACHE_TAG.STALLS", + "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.", + "SampleAfterValue": "200003", + "UMask": "0x4" + }, + { + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.DSB_CYCLES_ANY", + "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cycles DSB is delivering optimal number of Uops", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0x79", + "EventName": "IDQ.DSB_CYCLES_OK", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.DSB_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cycles MITE is delivering any Uop", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.MITE_CYCLES_ANY", + "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Cycles MITE is delivering optimal number of Uops", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0x79", + "EventName": "IDQ.MITE_CYCLES_OK", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.MITE_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.MS_CYCLES_ANY", + "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Number of switches from DSB or MITE to the MS", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x79", + "EventName": "IDQ.MS_SWITCHES", + "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.MS_UOPS", + "PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CORE", + "PublicDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", + "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK", + "Invert": "1", + "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", + "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", + "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", + "Invert": "1", + "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]", + "SampleAfterValue": "1000003", + "UMask": "0x1" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/memory.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/memory.json new file mode 100644 index 000000000000..a81ac60e58a2 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/memory.json @@ -0,0 +1,523 @@ +[ + { + "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "2", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x6" + }, + { + "BriefDescription": "Number of machine clears due to memory ordering conflicts.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "2", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "3", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "5", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS", + "PublicDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).", + "SampleAfterValue": "1000003", + "UMask": "0x5" + }, + { + "BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "9", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS", + "PublicDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).", + "SampleAfterValue": "1000003", + "UMask": "0x9" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024", + "MSRIndex": "0x3F6", + "MSRValue": "0x400", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "53", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", + "MSRIndex": "0x3F6", + "MSRValue": "0x80", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "1009", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", + "MSRIndex": "0x3F6", + "MSRValue": "0x10", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "20011", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048", + "MSRIndex": "0x3F6", + "MSRValue": "0x800", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "23", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", + "MSRIndex": "0x3F6", + "MSRValue": "0x100", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "503", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", + "MSRIndex": "0x3F6", + "MSRValue": "0x20", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", + "MSRIndex": "0x3F6", + "MSRValue": "0x4", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", + "MSRIndex": "0x3F6", + "MSRValue": "0x200", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "101", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", + "MSRIndex": "0x3F6", + "MSRValue": "0x40", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "2003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", + "MSRIndex": "0x3F6", + "MSRValue": "0x8", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "50021", + "UMask": "0x1" + }, + { + "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.", + "Counter": "0", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE", + "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x73C000004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x104000004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x73C000001", + "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00001", + "PublicDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x104000001", + "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x730000001", + "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x73C000002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F3FC00002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x104000002", + "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x73C004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F3FC04477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F04C04477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x70CC04477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x104004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x70C004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x730004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x733004477", + "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.WRITE_ESTIMATE.MEMORY", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0xFBFF80822", + "PublicDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM) Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts demand data read requests that miss the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD", + "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", + "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Number of times an RTM execution aborted.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.ABORTED", + "PublicDescription": "Counts the number of times RTM abort was triggered. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 3 categories (e.g. interrupt)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.ABORTED_EVENTS", + "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 3 categories (e.g. interrupt).", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.ABORTED_MEM", + "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.ABORTED_MEMTYPE", + "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.", + "SampleAfterValue": "100003", + "UMask": "0x40" + }, + { + "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY", + "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Number of times an RTM execution successfully committed", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.COMMIT", + "PublicDescription": "Counts the number of times RTM commit succeeded.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of times an RTM execution started.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc9", + "EventName": "RTM_RETIRED.START", + "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "TX_MEM.ABORT_CAPACITY_READ", + "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "TX_MEM.ABORT_CAPACITY_WRITE", + "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "TX_MEM.ABORT_CONFLICT", + "PublicDescription": "Counts the number of times a TSX line had a cache conflict.", + "SampleAfterValue": "100003", + "UMask": "0x1" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/other.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/other.json new file mode 100644 index 000000000000..0ded8312469e --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/other.json @@ -0,0 +1,65 @@ +[ + { + "BriefDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events. the event also counts for Machine Ordering count.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.HARDWARE", + "PublicDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events. This includes, but not limited to, assists at EXE or MEM uop writeback like AVX* load/store/gather/scatter (non-FP GSSE-assist ) , assists generated by ROB like PEBS and RTIT, Uncore trap, RAR (Remote Action Request) and CET (Control flow Enforcement Technology) assists. the event also counts for Machine Ordering count.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "ASSISTS.PAGE_FAULT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.PAGE_FAULT", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "HW_INTERRUPTS.MASKED", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcb", + "EventName": "HW_INTERRUPTS.MASKED", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "HW_INTERRUPTS.PENDING_AND_MASKED", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcb", + "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Number of hardware interrupts received by the processor.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcb", + "EventName": "HW_INTERRUPTS.RECEIVED", + "PublicDescription": "Counts the number of hardware interruptions received by the processor.", + "SampleAfterValue": "203", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts streaming stores that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.STREAMING_WR.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10800", + "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles the uncore cannot take further requests", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x2d", + "EventName": "XQ.FULL_CYCLES", + "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).", + "SampleAfterValue": "1000003", + "UMask": "0x1" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/pipeline.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/pipeline.json new file mode 100644 index 000000000000..2a145a8fd94d --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/pipeline.json @@ -0,0 +1,1145 @@ +[ + { + "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.DIV_ACTIVE", + "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.", + "SampleAfterValue": "1000003", + "UMask": "0x9" + }, + { + "BriefDescription": "This event counts the cycles the integer divider is busy.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.IDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.ANY", + "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.", + "SampleAfterValue": "100003", + "UMask": "0x1b" + }, + { + "BriefDescription": "All branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009" + }, + { + "BriefDescription": "Conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x11" + }, + { + "BriefDescription": "Not taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_NTAKEN", + "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x10" + }, + { + "BriefDescription": "Taken conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x1" + }, + { + "BriefDescription": "Far branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x40" + }, + { + "BriefDescription": "Indirect near branch instructions retired (excluding returns)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Direct and indirect near call instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x2" + }, + { + "BriefDescription": "Return instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x8" + }, + { + "BriefDescription": "Taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_TAKEN", + "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x20" + }, + { + "BriefDescription": "All mispredicted branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0", + "SampleAfterValue": "400009" + }, + { + "BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST", + "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x44" + }, + { + "BriefDescription": "Mispredicted conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND", + "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x11" + }, + { + "BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_COST", + "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x51" + }, + { + "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_NTAKEN", + "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x10" + }, + { + "BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST", + "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "RetirementLatencyMax": "888", + "RetirementLatencyMean": "6.11", + "RetirementLatencyMin": "0", + "SampleAfterValue": "400009", + "UMask": "0x50" + }, + { + "BriefDescription": "number of branch instructions retired that were mispredicted and taken.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN", + "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x1" + }, + { + "BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN_COST", + "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "RetirementLatencyMax": "2750", + "RetirementLatencyMean": "5.09", + "RetirementLatencyMin": "0", + "SampleAfterValue": "400009", + "UMask": "0x41" + }, + { + "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT", + "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x80" + }, + { + "BriefDescription": "Mispredicted indirect CALL retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL", + "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x2" + }, + { + "BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST", + "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "RetirementLatencyMax": "703", + "RetirementLatencyMean": "15.56", + "RetirementLatencyMin": "0", + "SampleAfterValue": "400009", + "UMask": "0x42" + }, + { + "BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_COST", + "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "RetirementLatencyMax": "1562", + "RetirementLatencyMean": "11.07", + "RetirementLatencyMin": "0", + "SampleAfterValue": "100003", + "UMask": "0xc0" + }, + { + "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x20" + }, + { + "BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST", + "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x60" + }, + { + "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RET", + "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x8" + }, + { + "BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RET_COST", + "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "RetirementLatencyMax": "1082", + "RetirementLatencyMean": "32.37", + "RetirementLatencyMin": "9", + "SampleAfterValue": "100007", + "UMask": "0x48" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C01", + "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C02", + "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C0_WAIT", + "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.", + "SampleAfterValue": "2000003", + "UMask": "0x70" + }, + { + "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED", + "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", + "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.", + "SampleAfterValue": "25003", + "UMask": "0x2" + }, + { + "BriefDescription": "CPU_CLK_UNHALTED.PAUSE", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.PAUSE", + "SampleAfterValue": "2000003", + "UMask": "0x40" + }, + { + "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.PAUSE_INST", + "SampleAfterValue": "2000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED", + "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Reference cycles when the core is not in halt state.", + "Counter": "Fixed counter 2", + "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", + "SampleAfterValue": "2000003", + "UMask": "0x3" + }, + { + "BriefDescription": "Reference cycles when the core is not in halt state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_TSC_P", + "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Core cycles when the thread is not in halt state", + "Counter": "Fixed counter 1", + "EventName": "CPU_CLK_UNHALTED.THREAD", + "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Thread cycles when thread is not in halt state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.THREAD_P", + "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.", + "SampleAfterValue": "2000003" + }, + { + "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "8", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles while memory subsystem has an outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "16", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "12", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0xc" + }, + { + "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "5", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x5" + }, + { + "BriefDescription": "Total execution stalls.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL", + "SampleAfterValue": "1000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb7", + "EventName": "EXE.AMX_BUSY", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.1_PORTS_UTIL", + "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL", + "SampleAfterValue": "2000003", + "UMask": "0xc" + }, + { + "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.2_PORTS_UTIL", + "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.3_PORTS_UTIL", + "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.4_PORTS_UTIL", + "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "5", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS", + "SampleAfterValue": "2000003", + "UMask": "0x21" + }, + { + "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.BOUND_ON_STORES", + "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS", + "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.", + "SampleAfterValue": "1000003", + "UMask": "0x80" + }, + { + "BriefDescription": "Instruction decoders utilized in a cycle", + "Counter": "0,1,2,3", + "EventCode": "0x75", + "EventName": "INST_DECODED.DECODERS", + "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event", + "Counter": "Fixed counter 0", + "EventName": "INST_RETIRED.ANY", + "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of instructions retired. General Counter - architectural event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.ANY_P", + "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.", + "SampleAfterValue": "2000003" + }, + { + "BriefDescription": "INST_RETIRED.MACRO_FUSED", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.MACRO_FUSED", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Retired NOP instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.NOP", + "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Precise instruction retired with PEBS precise-distribution", + "Counter": "Fixed counter 0", + "EventName": "INST_RETIRED.PREC_DIST", + "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Iterations of Repeat string retired instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.REP_ITERATION", + "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", + "SampleAfterValue": "2000003", + "UMask": "0x8" + }, + { + "BriefDescription": "Clears speculative count", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xad", + "EventName": "INT_MISC.CLEARS_COUNT", + "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears", + "SampleAfterValue": "500009", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES", + "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.", + "SampleAfterValue": "500009", + "UMask": "0x80" + }, + { + "BriefDescription": "INT_MISC.MBA_STALLS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.MBA_STALLS", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.RECOVERY_CYCLES", + "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.", + "SampleAfterValue": "500009", + "UMask": "0x1" + }, + { + "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES", + "MSRIndex": "0x3F7", + "MSRValue": "0x7", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "TMA slots where uops got dropped", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.UOP_DROPPING", + "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "INT_VEC_RETIRED.128BIT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.128BIT", + "SampleAfterValue": "1000003", + "UMask": "0x13" + }, + { + "BriefDescription": "INT_VEC_RETIRED.256BIT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.256BIT", + "SampleAfterValue": "1000003", + "UMask": "0xac" + }, + { + "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.ADD_128", + "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.", + "SampleAfterValue": "1000003", + "UMask": "0x3" + }, + { + "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.ADD_256", + "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.", + "SampleAfterValue": "1000003", + "UMask": "0xc" + }, + { + "BriefDescription": "INT_VEC_RETIRED.MUL_256", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.MUL_256", + "SampleAfterValue": "1000003", + "UMask": "0x80" + }, + { + "BriefDescription": "INT_VEC_RETIRED.SHUFFLES", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.SHUFFLES", + "SampleAfterValue": "1000003", + "UMask": "0x40" + }, + { + "BriefDescription": "INT_VEC_RETIRED.VNNI_128", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.VNNI_128", + "SampleAfterValue": "1000003", + "UMask": "0x10" + }, + { + "BriefDescription": "INT_VEC_RETIRED.VNNI_256", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.VNNI_256", + "SampleAfterValue": "1000003", + "UMask": "0x20" + }, + { + "BriefDescription": "False dependencies in MOB due to partial compare on address.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.ADDRESS_ALIAS", + "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.NO_SR", + "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", + "SampleAfterValue": "100003", + "UMask": "0x88" + }, + { + "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.", + "SampleAfterValue": "100003", + "UMask": "0x82" + }, + { + "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.", + "Counter": "0,1,2,3", + "EventCode": "0x4c", + "EventName": "LOAD_HIT_PREFETCH.SWPF", + "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xa8", + "EventName": "LSD.CYCLES_ACTIVE", + "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0xa8", + "EventName": "LSD.CYCLES_OK", + "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of Uops delivered by the LSD.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa8", + "EventName": "LSD.UOPS", + "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of machine clears (nukes) of any type.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.COUNT", + "PublicDescription": "Counts the number of machine clears (nukes) of any type.", + "SampleAfterValue": "100003", + "UMask": "0x1" + }, + { + "BriefDescription": "Self-modifying code (SMC) detected.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SMC", + "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "LFENCE instructions retired", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe0", + "EventName": "MISC2_RETIRED.LFENCE", + "PublicDescription": "number of LFENCE retired instructions", + "SampleAfterValue": "400009", + "UMask": "0x20" + }, + { + "BriefDescription": "Increments whenever there is an update to the LBR array.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcc", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa2", + "EventName": "RESOURCE_STALLS.SB", + "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa2", + "EventName": "RESOURCE_STALLS.SCOREBOARD", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa5", + "EventName": "RS.EMPTY", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", + "SampleAfterValue": "1000003", + "UMask": "0x7" + }, + { + "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xa5", + "EventName": "RS.EMPTY_COUNT", + "Invert": "1", + "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)", + "SampleAfterValue": "100003", + "UMask": "0x7" + }, + { + "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa5", + "EventName": "RS.EMPTY_RESOURCE", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "10000003", + "UMask": "0x2" + }, + { + "BriefDescription": "TMA slots wasted due to incorrect speculations.", + "Counter": "0", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BAD_SPEC_SLOTS", + "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.", + "SampleAfterValue": "10000003", + "UMask": "0x4" + }, + { + "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions", + "Counter": "0", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS", + "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.", + "SampleAfterValue": "10000003", + "UMask": "0x8" + }, + { + "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS", + "SampleAfterValue": "10000003", + "UMask": "0x10" + }, + { + "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event", + "Counter": "Fixed counter 3", + "EventName": "TOPDOWN.SLOTS", + "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).", + "SampleAfterValue": "10000003", + "UMask": "0x4" + }, + { + "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.SLOTS_P", + "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.", + "SampleAfterValue": "10000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Number of non dec-by-all uops decoded by decoder", + "Counter": "0,1,2,3", + "EventCode": "0x76", + "EventName": "UOPS_DECODED.DEC0_UOPS", + "PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.", + "SampleAfterValue": "1000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Uops executed on port 0", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_0", + "PublicDescription": "Number of uops dispatch to execution port 0.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Uops executed on port 1", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_1", + "PublicDescription": "Number of uops dispatch to execution port 1.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Uops executed on ports 2, 3 and 10", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_2_3_10", + "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "Uops executed on ports 4 and 9", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_4_9", + "PublicDescription": "Number of uops dispatch to execution ports 4 and 9", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Uops executed on ports 5 and 11", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_5_11", + "PublicDescription": "Number of uops dispatch to execution ports 5 and 11", + "SampleAfterValue": "2000003", + "UMask": "0x20" + }, + { + "BriefDescription": "Uops executed on port 6", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_6", + "PublicDescription": "Number of uops dispatch to execution port 6.", + "SampleAfterValue": "2000003", + "UMask": "0x40" + }, + { + "BriefDescription": "Uops executed on ports 7 and 8", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_7_8", + "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.", + "SampleAfterValue": "2000003", + "UMask": "0x80" + }, + { + "BriefDescription": "Number of uops executed on the core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE", + "PublicDescription": "Counts the number of uops executed from any thread.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", + "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", + "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "3", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3", + "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", + "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles where at least 1 uop was executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_1", + "PublicDescription": "Cycles where at least 1 uop was executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles where at least 2 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_2", + "PublicDescription": "Cycles where at least 2 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles where at least 3 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "3", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_3", + "PublicDescription": "Cycles where at least 3 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles where at least 4 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_4", + "PublicDescription": "Cycles where at least 4 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.STALLS", + "Invert": "1", + "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.THREAD", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Counts the number of x87 uops dispatched.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.X87", + "PublicDescription": "Counts the number of x87 uops executed.", + "SampleAfterValue": "2000003", + "UMask": "0x10" + }, + { + "BriefDescription": "Uops that RAT issues to RS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xae", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "UOPS_ISSUED.CYCLES", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xae", + "EventName": "UOPS_ISSUED.CYCLES", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "Cycles with retired uop(s).", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.CYCLES", + "PublicDescription": "Counts cycles where at least one uop has retired.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Retired uops except the last uop of each instruction.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.HEAVY", + "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "UOPS_RETIRED.MS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.MS", + "MSRIndex": "0x3F7", + "MSRValue": "0x8", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.SLOTS", + "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "Cycles without actually retired uops.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.STALLS", + "Invert": "1", + "PublicDescription": "This event counts cycles without actually retired uops.", + "SampleAfterValue": "1000003", + "UMask": "0x2" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cache.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cache.json new file mode 100644 index 000000000000..61151db6a5c6 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cache.json @@ -0,0 +1,3745 @@ +[ + { + "BriefDescription": "Clockticks for CMS units attached to CHA", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_CHACMS_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "PublicDescription": "UNC_CHACMS_CLOCKTICKS", + "Unit": "CHACMS" + }, + { + "BriefDescription": "UNC_CHACMS_DISTRESS_ASSERTED", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHACMS_DISTRESS_ASSERTED", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "CHACMS" + }, + { + "BriefDescription": "Counts the number of cycles FAST trigger is received from the global FAST distress wire.", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHACMS_RING_SRC_THRTL", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "CHACMS" + }, + { + "BriefDescription": "Number of CHA clock cycles while the event is enabled", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_CHA_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Clockticks of the uncore caching and home agent (CHA)", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.", + "Counter": "0,1,2,3", + "EventCode": "0x53", + "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.", + "Counter": "0,1,2,3", + "EventCode": "0x53", + "EventName": "UNC_CHA_DIR_LOOKUP.SNP", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "UNC_CHA_DIR_UPDATE.HA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "UNC_CHA_DIR_UPDATE.TOR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).", + "Counter": "0,1,2,3", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).", + "Counter": "0,1,2,3", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).", + "Counter": "0,1,2,3", + "EventCode": "0x59", + "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.", + "Counter": "0,1,2,3", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.", + "Counter": "0,1,2,3", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.", + "Counter": "0,1,2,3", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.", + "Counter": "0,1,2,3", + "EventCode": "0x5b", + "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : All transactions from Remote Agents", + "UMask": "0x17e0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: CRd Requests", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.CODE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1bd0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1bc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Reads", + "UMask": "0x1fc1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches", + "UMask": "0x841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Data Read Misses", + "UMask": "0x1fc101", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Transactions homed locally", + "UMask": "0xbdfff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x19d0ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x19c1ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1850ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1841ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1848ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x189dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x199dff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1910ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1981ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1908ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x19c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC", + "UMask": "0x15dfff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Code Read/Prefetch Requests from a Remote Socket", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : CRd Requests", + "UMask": "0x1a10ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Data Read/Prefetch Requests from a Remote Socket", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions", + "UMask": "0x1a01ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests/Prefetches from a Remote Socket", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : RFO Requests", + "UMask": "0x1a08ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Snoop Requests from a Remote Socket", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Counts the number of times the LLC was accessed", + "UMask": "0x1c19ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: All RFO and RFO Prefetches", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches", + "UMask": "0x1bc8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches", + "UMask": "0x9c8ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Writes", + "UMask": "0x842ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Cache Lookups: Writes to Remotely Homed Memory (includes writebacks from L1/L2)", + "Counter": "0,1,2,3", + "EventCode": "0x34", + "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cache Lookups : Remote Writes", + "UMask": "0x17c2ff", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : All Lines Victimized", + "UMask": "0xf", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.IO", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - All Lines", + "UMask": "0x200f", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in E State", + "UMask": "0x2002", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in F State", + "UMask": "0x2008", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in M State", + "UMask": "0x2001", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Local - Lines in S State", + "UMask": "0x2004", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - All Lines", + "UMask": "0x800f", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in E State", + "UMask": "0x8002", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in M State", + "UMask": "0x8001", + "Unit": "CHA" + }, + { + "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Remote - Lines in S State", + "UMask": "0x8004", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in E state", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in M state", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Lines Victimized : Lines in S State", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.", + "Counter": "0,1,2,3", + "EventCode": "0x39", + "EventName": "UNC_CHA_MISC.RFO_HIT_S", + "PerPkg": "1", + "PublicDescription": "Cbo Misc : RFO HitS", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "Counter": "0,1,2,3", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_INVITOE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "Counter": "0,1,2,3", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.LOCAL_READ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "Counter": "0,1,2,3", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "Counter": "0,1,2,3", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.REMOTE_READ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", + "Counter": "0,1,2,3", + "EventCode": "0x55", + "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_SHARED", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.ALLOC_SHARED", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY", + "Experimental": "1", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_SHARED", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.HIT_SHARED", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY", + "Experimental": "1", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.MISS", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.MISS", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE", + "Experimental": "1", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_SHARED", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.UPDATE_SHARED", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE", + "Experimental": "1", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_SHARED", + "Counter": "0,1,2,3", + "EventCode": "0x69", + "EventName": "UNC_CHA_REMOTE_SF.VICTIM_SHARED", + "Experimental": "1", + "PerPkg": "1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : InvalItoE", + "UMask": "0x30", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Reads", + "UMask": "0x3", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS_LOCAL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.READS_REMOTE", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES", + "PerPkg": "1", + "PublicDescription": "HA Read and Write Requests : Writes", + "UMask": "0xc", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", + "Counter": "0,1,2,3", + "EventCode": "0x50", + "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "CHA" + }, + { + "BriefDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_CHA_RxC_INSERTS.IRQ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.", + "Counter": "0", + "EventCode": "0x11", + "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.", + "Counter": "0,1,2,3", + "EventCode": "0x3d", + "EventName": "UNC_CHA_SF_EVICTION.E_STATE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Snoop Filter Capacity Evictions : E state", + "UMask": "0x2", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.", + "Counter": "0,1,2,3", + "EventCode": "0x3d", + "EventName": "UNC_CHA_SF_EVICTION.M_STATE", + "PerPkg": "1", + "PublicDescription": "Snoop Filter Capacity Evictions : M state", + "UMask": "0x1", + "Unit": "CHA" + }, + { + "BriefDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.", + "Counter": "0,1,2,3", + "EventCode": "0x3d", + "EventName": "UNC_CHA_SF_EVICTION.S_STATE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Snoop Filter Capacity Evictions : S state", + "UMask": "0x4", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR Inserts", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_CLFLUSH", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8c7fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCur transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCUR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCurPtl transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCURPTL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c9effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc47fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMWr transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOMWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc4ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "MemPushWr transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_MEMPUSHWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc6ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WCiL transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c86ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WcilF transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCILF", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c867fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "WiL transactions from a CXL device which hit in the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c87ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_CLFLUSH", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8c7fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCur transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCUR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "FsRdCurPtl transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCURPTL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c9effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc47fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMWr transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOMWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc4ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "MemPushWr transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_MEMPUSHWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc6ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WCiL transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c86ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WcilF transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCILF", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c867fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "WiL transactions from a CXL device which miss the L3.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c87ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush events that are initiated from the Core", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores", + "UMask": "0xc817ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd PTEs issued by iA Cores due to a page walk", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk", + "UMask": "0xc837ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read prefetch from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores", + "UMask": "0xc897ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018101", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC", + "UMask": "0xc817fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd PTEs issued by iA Cores due to page walks that hit the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk that hit the LLC", + "UMask": "0xc837fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read prefetch from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc897fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM events that are initiated from the Core", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear requests from local IA cores", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests from IA Cores which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRDs from local IA cores to locally homed memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Code read prefetch from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRD Prefetches from local IA cores to locally homed memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRD Prefetches from local IA cores to remotely homed memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc88f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "CRDs from local IA cores to remotely homed memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc80f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018201", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC", + "UMask": "0xc817fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd PTEs issued by iA Cores due to a page walk that missed the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk that missed the LLC", + "UMask": "0xc837fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC", + "PerPkg": "1", + "PublicDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "UMask": "0x10c8178201", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds issued by iA Cores targeting DDR Mem that Missed the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC", + "UMask": "0xc8178601", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read from local IA that miss the cache and targets local memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc816fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8168601", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds from local IA cores to locally homed PMM addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8168a01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds issued by iA Cores targeting PMM Mem that Missed the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC", + "UMask": "0xc8178a01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read prefetch from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc897fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8978201", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC", + "UMask": "0xc8978601", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read prefetch from local IA that miss the cache and targets local memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target local memory", + "UMask": "0xc896fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8968601", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to locally homed PMM addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8968a01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to PMM addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC", + "UMask": "0xc8978a01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read prefetch from local IA that miss the cache and targets remote memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target remote memory", + "UMask": "0xc8977e01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8970601", + "Unit": "CHA" + }, + { + "BriefDescription": "DRd Prefetches from local IA cores to remotely homed PMM addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8970a01", + "Unit": "CHA" + }, + { + "BriefDescription": "Data read from local IA that miss the cache and targets remote memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8177e01", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8170601", + "Unit": "CHA" + }, + { + "BriefDescription": "DRds from local IA cores to remotely homed PMM addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8170a01", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM requests from local IA cores that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch code read from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch data read from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccd78201", + "Unit": "CHA" + }, + { + "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8878201", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to locally homed PMM addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc8668a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to locally homed PMM addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc86e8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc8670601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc8670a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc86f0601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc86f0a01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8078201", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the LLC targeting local memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccc78201", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the LLC targeting local memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA that miss the LLC targeting remote memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8877e01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA that miss the LLC targeting remote memory", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8077e01", + "Unit": "CHA" + }, + { + "BriefDescription": "UCRDF requests from local IA cores that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA cores to PMM homed addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc8678a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core to PMM homed addresses that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc86f8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "WIL requests from local IA cores that miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "Read for ownership prefetch from local IA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "SpecItoM events that are initiated from the Core", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc3fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc37ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc2fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbMtoI requests from local IA cores", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc67ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCIL requests from a local IA core", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "WCILF requests from local IA core", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "CLFlush requests from IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMs from local IO devices which hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices which hit the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on the local socket", + "UMask": "0xcd42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on a remote socket", + "UMask": "0xcd437f04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on the local socket", + "UMask": "0xcc42ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on a remote socket", + "UMask": "0xcc437f04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR inserts from local IO devices which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCURs issued by IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on the local socket", + "UMask": "0xc8f2ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on a remote socket", + "UMask": "0xc8f37f04", + "Unit": "CHA" + }, + { + "BriefDescription": "RFOs from local IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "WBMtoI requests from IO devices", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Inserts for SF or LLC Evictions", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS", + "PerPkg": "1", + "PublicDescription": "TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)", + "UMask": "0xc001ff02", + "Unit": "CHA" + }, + { + "BriefDescription": "All locally initiated requests", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local iA", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "All from Local IO", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "All remote requests (e.g. snoops, writebacks) that came from remote sockets", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All Remote Requests", + "UMask": "0xc001ffc8", + "Unit": "CHA" + }, + { + "BriefDescription": "All snoops to this LLC that came from remote sockets", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Inserts : All Snoops from Remote", + "UMask": "0xc001ff08", + "Unit": "CHA" + }, + { + "BriefDescription": "Occupancy for all TOR entries", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All", + "UMask": "0xc001ffff", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_CLFLUSH", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8c7fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCUR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCURPTL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c9effd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc47fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOMWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc4ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_MEMPUSHWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc6ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c86ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCILF", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c867fd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which hit in the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c87ffd20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_CLFLUSH", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8c7fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCUR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c8effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCURPTL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c9effe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc47fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOMWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc4ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_MEMPUSHWR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78cc6ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c86ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCILF", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c867fe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which miss the L3.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WIL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78c87ffe20", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores", + "UMask": "0xc001ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores", + "UMask": "0xc8c7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores", + "UMask": "0xc80fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc88fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores", + "UMask": "0xc817ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to a page walk", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk", + "UMask": "0xc837ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores", + "UMask": "0xc897ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC", + "UMask": "0xc001fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC", + "UMask": "0xc80ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC", + "UMask": "0xc88ffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018101", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC", + "UMask": "0xc817fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to page walks that hit the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC", + "UMask": "0xc837fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc897fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC", + "UMask": "0xcc47fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC", + "UMask": "0xcccffd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC", + "UMask": "0xccd7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC", + "UMask": "0xccc7fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC", + "UMask": "0xc807fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC", + "UMask": "0xc887fd01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores", + "UMask": "0xcc47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores", + "UMask": "0xcd47ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores", + "UMask": "0xcccfff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores", + "UMask": "0xccd7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores", + "UMask": "0xccc7ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC", + "UMask": "0xc001fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC", + "UMask": "0xc80ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc80efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc88ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc88efe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to remotely homed memory", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc88f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CRDs from local IA cores to remotely homed memory", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc80f7e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c0018201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC", + "UMask": "0xc817fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to a page walk that missed the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC", + "UMask": "0xc837fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8178201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC", + "UMask": "0xc8178601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc816fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8168601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds from local IA cores to locally homed PMM addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8168a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC", + "UMask": "0xc8178a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc897fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8978201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC", + "UMask": "0xc8978601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc896fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8968601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to locally homed PMM addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally", + "UMask": "0xc8968a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to PMM addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC", + "UMask": "0xc8978a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter", + "UMask": "0xc8977e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8970601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to remotely homed PMM addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8970a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8177e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8170601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for DRds from local IA cores to remotely homed PMM addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely", + "UMask": "0xc8170a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC", + "UMask": "0xcc47fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC", + "UMask": "0xcccffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC", + "UMask": "0xccd7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccd78201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC", + "UMask": "0xccc7fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8878201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc8668601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed PMM addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc8668a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally", + "UMask": "0xc86e8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed PMM addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally", + "UMask": "0xc86e8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc8670601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc8670a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely", + "UMask": "0xc86f0601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely", + "UMask": "0xc86f0a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC", + "UMask": "0xc807fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10c8078201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc806fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC", + "UMask": "0xc887fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC", + "PerPkg": "1", + "UMask": "0x10ccc78201", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally", + "UMask": "0xc886fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8877e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely", + "UMask": "0xc8077e01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC", + "UMask": "0xc877de01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC", + "UMask": "0xc86ffe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC", + "UMask": "0xc867fe01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc8678601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to PMM homed addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc8678a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC", + "UMask": "0xc86f8601", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core to PMM homed addresses that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC", + "UMask": "0xc86f8a01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC", + "UMask": "0xc87fde01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores", + "UMask": "0xc807ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores", + "UMask": "0xc887ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores", + "UMask": "0xcc57ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores", + "UMask": "0xcc27ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores", + "UMask": "0xc86fff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WCILF requests from local IA core", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores", + "UMask": "0xc867ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices", + "UMask": "0xc001ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for CLFlush requests from IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices", + "UMask": "0xc8c3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC", + "UMask": "0xc001fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC", + "UMask": "0xcc43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC", + "UMask": "0xcd43fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC", + "UMask": "0xc8f3fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC", + "UMask": "0xc803fd04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices", + "UMask": "0xcc43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices", + "UMask": "0xcd43ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC", + "UMask": "0xc001fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd43fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on the local socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on a remote socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC", + "UMask": "0xcd437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on the local socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc42fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on a remote socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC", + "UMask": "0xcc437e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f3fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on the local socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f2fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on a remote socket that miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC", + "UMask": "0xc8f37e04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC", + "UMask": "0xc803fe04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices", + "UMask": "0xc8f3ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for RFOs from local IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices", + "UMask": "0xc803ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices", + "UMask": "0xcc23ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All locally initiated requests", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA and IO", + "UMask": "0xc000ff05", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local iA", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local iA", + "UMask": "0xc000ff01", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All from Local IO", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All from Local IO", + "UMask": "0xc000ff04", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All remote requests (e.g. snoops, writebacks) that came from remote sockets", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All Remote Requests", + "UMask": "0xc001ffc8", + "Unit": "CHA" + }, + { + "BriefDescription": "TOR Occupancy for All snoops to this LLC that came from remote sockets", + "Counter": "0", + "EventCode": "0x36", + "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "TOR Occupancy : All Snoops from Remote", + "UMask": "0xc001ff08", + "Unit": "CHA" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cxl.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cxl.json new file mode 100644 index 000000000000..006be1843f02 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-cxl.json @@ -0,0 +1,29 @@ +[ + { + "BriefDescription": "B2CXL Clockticks", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_B2CXL_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "B2CXL" + }, + { + "BriefDescription": "Number of Allocation to Mem Data Packing buffer", + "Counter": "4,5,6,7", + "EventCode": "0x41", + "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "CXLCM" + }, + { + "BriefDescription": "Number of Allocation to M2S Data AGF", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "CXLDP" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-interconnect.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-interconnect.json new file mode 100644 index 000000000000..fc9d1aad4ee1 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-interconnect.json @@ -0,0 +1,1979 @@ +[ + { + "BriefDescription": "Clockticks of the mesh to memory (B2CMI)", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_B2CMI_CLOCKTICKS", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of time D2C was not honoured by egress due to directory state constraints", + "Counter": "0,1,2,3", + "EventCode": "0x17", + "EventName": "UNC_B2CMI_DIRECT2CORE_NOT_TAKEN_DIRSTATE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)", + "Counter": "0,1,2,3", + "EventCode": "0x16", + "EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn", + "Counter": "0,1,2,3", + "EventCode": "0x18", + "EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of d2k wasn't done due to credit constraints", + "Counter": "0,1,2,3", + "EventCode": "0x1B", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Direct to UPI Transactions - Ignored due to lack of credits : All : Counts the number of d2k wasn't done due to credit constraints", + "Counter": "0,1,2,3", + "EventCode": "0x1B", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS.EGRESS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints", + "Counter": "0,1,2,3", + "EventCode": "0x1A", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints", + "Counter": "0,1,2,3", + "EventCode": "0x1A", + "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times egress did D2K (Direct to KTI)", + "Counter": "0,1,2,3", + "EventCode": "0x19", + "EventName": "UNC_B2CMI_DIRECT2UPI_TAKEN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of times D2K wasn't honoured even though the incoming request had d2k set for non cisgress txn", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_B2CMI_DIRECT2UPI_TXN_OVERRIDE", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit Clean", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x38", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in A State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in I State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On NonDirty Line in S State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit Dirty (modified)", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x7", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in A State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in I State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Hit : On Dirty Line in S State", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with any directory to non persistent memory", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.ANY", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory A to non persistent memory", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_A", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory I to non persistent memory", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_I", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_S", + "PerPkg": "1", + "PublicDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss Clean", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x38", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in A State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in I State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On NonDirty Line in S State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss Dirty (modified)", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x7", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in A State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in I State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory Miss : On Dirty Line in S State", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any A2I Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2I", + "PerPkg": "1", + "UMask": "0x320", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any A2S Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2S", + "PerPkg": "1", + "UMask": "0x340", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts cisgress directory updates", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.ANY", + "PerPkg": "1", + "UMask": "0x301", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM)", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_ANY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the A state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x114", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the I state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x128", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in near memory to the S state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x142", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any I2A Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2A", + "PerPkg": "1", + "UMask": "0x304", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any I2S Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2S", + "PerPkg": "1", + "UMask": "0x302", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the A state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x214", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the I state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x228", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update in far memory to the S state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x242", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any S2A Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x310", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Any S2I Transition", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x308", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the A state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2A", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x314", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the I state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x328", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Directory update to the S state", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2S", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x342", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts any read", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.ALL", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts normal reads issue to CMI", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.NORMAL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Count reads to NM region", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_CACHE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x110", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts reads to 1lm non persistent memory regions", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "B2CMI" + }, + { + "BriefDescription": "All Writes - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.ALL", + "PerPkg": "1", + "UMask": "0x110", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Full Non-ISOCH - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.FULL", + "PerPkg": "1", + "UMask": "0x101", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Non-Inclusive - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.NI", + "Experimental": "1", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Non-Inclusive Miss - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.NI_MISS", + "Experimental": "1", + "PerPkg": "1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Partial Non-ISOCH - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL", + "PerPkg": "1", + "UMask": "0x102", + "Unit": "B2CMI" + }, + { + "BriefDescription": "DDR, acting as Cache - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_CACHE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x140", + "Unit": "B2CMI" + }, + { + "BriefDescription": "DDR - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x120", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0", + "Counter": "0,1,2,3", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_UPI", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0", + "Counter": "0,1,2,3", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.UPI_ALLCH", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Inserts : XPT -All Channels", + "Counter": "0,1,2,3", + "EventCode": "0x56", + "EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH", + "PerPkg": "1", + "PublicDescription": "Prefetch CAM Inserts : XPT - All Channels", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Prefetch CAM Occupancy : Channel 0", + "Counter": "0,1,2,3", + "EventCode": "0x54", + "EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads and WRNI which were a hit", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.ALL", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a hit clean", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.RD_CLEAN", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a hit dirty", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.RD_DIRTY", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a hit clean", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.WR_CLEAN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a hit dirty", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_B2CMI_TAG_HIT.WR_DIRTY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.CLEAN", + "PerPkg": "1", + "UMask": "0x5", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.DIRTY", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a Rd", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_2WAY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is unmodified", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_CLEAN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is modified", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.RD_DIRTY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm second way read miss for a WrNI", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_2WAY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is unmodified", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_CLEAN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is modified", + "Counter": "0,1,2,3", + "EventCode": "0x4B", + "EventName": "UNC_B2CMI_TAG_MISS.WR_DIRTY", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Inserts : Channel 0", + "Counter": "0,1,2,3", + "EventCode": "0x32", + "EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0", + "PerPkg": "1", + "UMask": "0x104", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Tracker Occupancy : Channel 0", + "Counter": "0,1,2,3", + "EventCode": "0x33", + "EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "Write Tracker Inserts : Channel 0", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "B2CMI" + }, + { + "BriefDescription": "UNC_B2HOT_CLOCKTICKS", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_B2HOT_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Clockticks for the B2HOT unit", + "UMask": "0x1", + "Unit": "B2HOT" + }, + { + "BriefDescription": "Number of uclks in domain", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_B2UPI_CLOCKTICKS", + "PerPkg": "1", + "Unit": "B2UPI" + }, + { + "BriefDescription": "Total Write Cache Occupancy : Mem", + "Counter": "0,1,2,3", + "EventCode": "0x0F", + "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IRP" + }, + { + "BriefDescription": "IRP Clockticks", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_I_CLOCKTICKS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue", + "Counter": "0,1,2,3", + "EventCode": "0x18", + "EventName": "UNC_I_FAF_INSERTS", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "FAF occupancy", + "Counter": "0,1,2,3", + "EventCode": "0x19", + "EventName": "UNC_I_FAF_OCCUPANCY", + "Experimental": "1", + "PerPkg": "1", + "Unit": "IRP" + }, + { + "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_I_MISC0.FAST_REJ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IRP" + }, + { + "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_I_MISC0.FAST_REQ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IRP" + }, + { + "BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_I_MISC1.LOST_FWD", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IRP" + }, + { + "BriefDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state", + "Counter": "0,1,2,3", + "EventCode": "0x1F", + "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IRP" + }, + { + "BriefDescription": "Snoop Hit E/S responses", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x74", + "Unit": "IRP" + }, + { + "BriefDescription": "Snoop Hit I responses", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x72", + "Unit": "IRP" + }, + { + "BriefDescription": "Snoop Hit M responses", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x78", + "Unit": "IRP" + }, + { + "BriefDescription": "Snoop miss responses", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_I_SNOOP_RESP.ALL_MISS", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x71", + "Unit": "IRP" + }, + { + "BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "UNC_I_TRANSACTIONS.WR_PREF", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IRP" + }, + { + "BriefDescription": "MDF Clockticks", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_MDF_CLOCKTICKS", + "PerPkg": "1", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of packets bypassing the ingress queue", + "Counter": "0,1,2,3", + "EventCode": "0x14", + "EventName": "UNC_MDF_RxR_BYPASS.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AD_BNC)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AD)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AK)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (BL_BNC)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (BL_CRD)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (IV)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "UNC_MDF_RxR_INSERTS.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Occupancy counts for the Ingress buffer", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "UNC_MDF_RxR_OCCUPANCY.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for AD_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for AD_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for AK", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for BL_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for BL_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress bypasses for IV", + "Counter": "0,1,2,3", + "EventCode": "0x1E", + "EventName": "UNC_MDF_TxR_BYPASS.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for AD_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for AD_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for AK", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for BL_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for BL_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of egress inserts for IV", + "Counter": "0,1,2,3", + "EventCode": "0x1C", + "EventName": "UNC_MDF_TxR_INSERTS.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for AD_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.AD_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for AD_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.AD_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for AK", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.AK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for BL_BNC", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.BL_BNC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for BL_CRD", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.BL_CRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "MDF" + }, + { + "BriefDescription": "Egress occupancy for IV", + "Counter": "0,1,2,3", + "EventCode": "0x1D", + "EventName": "UNC_MDF_TxR_OCCUPANCY.IV", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "MDF" + }, + { + "BriefDescription": "Number of UPI LL clock cycles while the event is enabled", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_UPI_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "Number of kfclks", + "Unit": "UPI" + }, + { + "BriefDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "UNC_UPI_L1_POWER_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xe", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10e", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10f", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Request", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1aa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x12a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xc", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10c", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Snoop", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x9", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x109", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Writeback", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB", + "PerPkg": "1", + "UMask": "0xd", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10d", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Null FLITs received from any slot", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Valid Flits Received : Null FLITs received from any slot", + "UMask": "0x27", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.DATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.IDLE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x47", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.LLCRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.LLCTRL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.NON_DATA", + "PerPkg": "1", + "UMask": "0x97", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.NULL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.PROTHDR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_UPI_RxL_FLITS.SLOT2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "Counter": "0,1,2,3", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "Counter": "0,1,2,3", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", + "Counter": "0,1,2,3", + "EventCode": "0x30", + "EventName": "UNC_UPI_RxL_INSERTS.SLOT2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 0", + "Counter": "0,1,2,3", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 1", + "Counter": "0,1,2,3", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "RxQ Occupancy - All Packets : Slot 2", + "Counter": "0,1,2,3", + "EventCode": "0x32", + "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "Cycles in L0p", + "Counter": "0,1,2,3", + "EventCode": "0x27", + "EventName": "UNC_UPI_TxL0P_POWER_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER", + "Counter": "0,1,2,3", + "EventCode": "0x28", + "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER", + "Experimental": "1", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT", + "Counter": "0,1,2,3", + "EventCode": "0x29", + "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT", + "Experimental": "1", + "PerPkg": "1", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xe", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10e", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10f", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Request", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x108", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1aa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x12a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xc", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10c", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xa", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10a", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x9", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x109", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xd", + "Unit": "UPI" + }, + { + "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10d", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA", + "PerPkg": "1", + "UMask": "0xf", + "Unit": "UPI" + }, + { + "BriefDescription": "All Null Flits", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL", + "PerPkg": "1", + "PublicDescription": "Valid Flits Sent : Idle", + "UMask": "0x27", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.DATA", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.IDLE", + "PerPkg": "1", + "UMask": "0x47", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.LLCRD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.LLCTRL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.NON_DATA", + "PerPkg": "1", + "PublicDescription": "Valid Flits Sent : Null FLITs transmitted to any slot", + "UMask": "0x97", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.NULL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.PROTHDR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "UPI" + }, + { + "BriefDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_UPI_TxL_FLITS.SLOT2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "UPI" + }, + { + "BriefDescription": "Message Received : Doorbell", + "Counter": "0,1", + "EventCode": "0x42", + "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "UBOX" + }, + { + "BriefDescription": "Message Received : Interrupt", + "Counter": "0,1", + "EventCode": "0x42", + "EventName": "UNC_U_EVENT_MSG.INT_PRIO", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Message Received : Interrupt : Interrupts", + "UMask": "0x10", + "Unit": "UBOX" + }, + { + "BriefDescription": "Message Received : IPI", + "Counter": "0,1", + "EventCode": "0x42", + "EventName": "UNC_U_EVENT_MSG.IPI_RCVD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Message Received : IPI : Inter Processor Interrupts", + "UMask": "0x4", + "Unit": "UBOX" + }, + { + "BriefDescription": "Message Received : MSI", + "Counter": "0,1", + "EventCode": "0x42", + "EventName": "UNC_U_EVENT_MSG.MSI_RCVD", + "PerPkg": "1", + "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)", + "UMask": "0x2", + "Unit": "UBOX" + }, + { + "BriefDescription": "Message Received : VLW", + "Counter": "0,1", + "EventCode": "0x42", + "EventName": "UNC_U_EVENT_MSG.VLW_RCVD", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.", + "UMask": "0x1", + "Unit": "UBOX" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-io.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-io.json new file mode 100644 index 000000000000..be8f29625901 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-io.json @@ -0,0 +1,1925 @@ +[ + { + "BriefDescription": "IIO Clockticks", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_IIO_CLOCKTICKS", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.", + "Counter": "0,1,2,3", + "EventCode": "0xC2", + "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0xff", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "Count of allocations in the completion buffer", + "Counter": "2,3", + "EventCode": "0xD5", + "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.", + "Counter": "2,3", + "EventCode": "0xC0", + "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes read from this card to memory. This event does include reads to IO.", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes written from this card to memory. This event does include writes to IO.", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x02", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x04", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x08", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x10", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x20", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x40", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x80", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Counts once for every 4 bytes written from this card to a peer device's IO space.", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.ALL_PARTS", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x83", + "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 1G Page", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.1G_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 2M Page", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.2M_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Hits to a 4K Page", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.4K_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB lookups all", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache hits", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Context cache lookups", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB lookups first", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB Fills (same as IOTLB miss)", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.MISSES", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU memory access (both low and high priority)", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0xc0", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU high priority memory access", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_HIGH", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "IOMMU low priority memory access", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_LOW", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 1G page", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 256T page", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 2M page", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_2M_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache Hit to a 512G page", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache fill", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_FILLS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "Second Level Page Walk Cache lookup", + "Counter": "0,1,2,3", + "EventCode": "0x41", + "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_LOOKUPS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Cycles PWT full", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.CYC_PWT_FULL", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry cache hit", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry cache lookup", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "Context Cache invalidation events", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_CTXT_CACHE", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Interrupt Entry Cache invalidation events", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_INT_CACHE", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "IOTLB invalidation events", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_IOTLB", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "PASID Cache invalidation events", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PASID_CACHE", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO]", + "Counter": "2,3", + "Deprecated": "1", + "EventCode": "0xc5", + "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing [This event is alias to UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO]", + "Counter": "2,3", + "EventCode": "0xc5", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Passing data to be written", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "Issuing final read or write of line", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Processing response from IOMMU", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Issuing to IOMMU", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Request Ownership", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Writing line", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x80", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x40", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x20", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": "-", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED", + "FCMask": "0x01", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "All 9 bits of Page Walk Tracker Occupancy", + "Counter": "0,1,2,3", + "EventCode": "0x42", + "EventName": "UNC_IIO_PWT_OCCUPANCY", + "Experimental": "1", + "PerPkg": "1", + "PortMask": "0x000", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.", + "Counter": "2,3", + "EventCode": "0xC1", + "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.ALL_PARTS", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x0FF", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x4", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x1", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x001", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x002", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x004", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x008", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x010", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x020", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x040", + "UMask": "0x2", + "Unit": "IIO" + }, + { + "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7", + "Experimental": "1", + "FCMask": "0x07", + "PerPkg": "1", + "PortMask": "0x080", + "UMask": "0x2", + "Unit": "IIO" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-memory.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-memory.json new file mode 100644 index 000000000000..252b7b4530ee --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-memory.json @@ -0,0 +1,890 @@ +[ + { + "BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xf7", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.RD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.UFILL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", + "Counter": "0,1,2,3", + "EventCode": "0x02", + "EventName": "UNC_M_ACT_COUNT.WR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all CAS operations", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_NON_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc3", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 auto-precharge reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_REG", + "PerPkg": "1", + "UMask": "0xc2", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 auto-precharge underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc8", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL_ALL", + "PerPkg": "1", + "UMask": "0xcc", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0, all writes", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 regular writes", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 0 auto-precharge writes", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all CAS operations", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD", + "PerPkg": "1", + "UMask": "0xcf", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_NON_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc3", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 auto-precharge reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_REG", + "PerPkg": "1", + "UMask": "0xc2", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 auto-precharge underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc8", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG", + "PerPkg": "1", + "UMask": "0xc1", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL", + "PerPkg": "1", + "UMask": "0xc4", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 underfill reads", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL_ALL", + "PerPkg": "1", + "UMask": "0xcc", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1, all writes", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR", + "PerPkg": "1", + "UMask": "0xf0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 regular writes", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xd0", + "Unit": "IMC" + }, + { + "BriefDescription": "CAS count for SubChannel 1 auto-precharge writes", + "Counter": "0,1,2,3", + "EventCode": "0x06", + "EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xe0", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled. DCLK is 1/4 of DRAM data rate.", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_M_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_M_HCLOCKTICKS", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "DRAM Clockticks", + "Unit": "IMC" + }, + { + "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate", + "Counter": "0,1,2,3", + "EventCode": "0x72", + "EventName": "UNC_M_MNTCMD_REFRATE.REFAB1X", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate", + "Counter": "0,1,2,3", + "EventCode": "0x72", + "EventName": "UNC_M_MNTCMD_REFRATE.REFAB2X", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IMC" + }, + { + "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate", + "Counter": "0,1,2,3", + "EventCode": "0x72", + "EventName": "UNC_M_MNTCMD_REFRATE.REFSB1X", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate", + "Counter": "0,1,2,3", + "EventCode": "0x72", + "EventName": "UNC_M_MNTCMD_REFRATE.REFSB2X", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh", + "Counter": "0,1,2,3", + "EventCode": "0xA7", + "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh", + "Counter": "0,1,2,3", + "EventCode": "0xA7", + "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh", + "Counter": "0,1,2,3", + "EventCode": "0xA7", + "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh", + "Counter": "0,1,2,3", + "EventCode": "0xA7", + "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 MRRs was triggered/running", + "Counter": "0,1,2,3", + "EventCode": "0xA6", + "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 MRRs was triggered/running", + "Counter": "0,1,2,3", + "EventCode": "0xA6", + "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 MRRs was triggered/running", + "Counter": "0,1,2,3", + "EventCode": "0xA6", + "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles MR4 MRRs was triggered/running", + "Counter": "0,1,2,3", + "EventCode": "0xA6", + "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK3", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK2", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode", + "Counter": "0,1,2,3", + "EventCode": "0x47", + "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK3", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles a given rank is in Power Down Mode and all pages are closed", + "Counter": "0,1,2,3", + "EventCode": "0x88", + "EventName": "UNC_M_POWER_CHANNEL_PPD_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.", + "Counter": "0,1,2,3", + "EventCode": "0x89", + "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.", + "Counter": "0,1,2,3", + "EventCode": "0x89", + "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0", + "Counter": "0,1,2,3", + "EventCode": "0x46", + "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1", + "Counter": "0,1,2,3", + "EventCode": "0x46", + "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "MR4 temp reading is throttling", + "Counter": "0,1,2,3", + "EventCode": "0x46", + "EventName": "UNC_M_POWER_THROTTLE_CYCLES.MR4BLKEN", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "IMC" + }, + { + "BriefDescription": "RAPL is throttling", + "Counter": "0,1,2,3", + "EventCode": "0x46", + "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RAPLBLK", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.ALL", + "PerPkg": "1", + "UMask": "0xff", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.PGT", + "PerPkg": "1", + "UMask": "0xf8", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.RD", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf1", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.UFILL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf4", + "Unit": "IMC" + }, + { + "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "UNC_M_PRE_COUNT.WR", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xf2", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer inserts on subchannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x17", + "EventName": "UNC_M_RDB_INSERTS.SCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x1a", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read buffer occupancy on subchannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x1b", + "EventName": "UNC_M_RDB_OCCUPANCY_SCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.PCH1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x10", + "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x80", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x81", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x82", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x83", + "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "subevent0 - # of cycles all ranks were in SR subevent1 - # of times all ranks went into SR subevent2 -# of times ps_sr_active asserted (SRE) subevent3 - # of times ps_sr_active deasserted (SRX) subevent4 - # of times PS-&>Refresh ps_sr_req asserted (SRE) subevent5 - # of times PS-&>Refresh ps_sr_req deasserted (SRX) subevent6 - # of cycles PSCtrlr FSM was in FATAL", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles all ranks were in SR", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8e", + "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at High level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8d", + "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at High level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8d", + "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8b", + "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8b", + "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8c", + "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "IMC" + }, + { + "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM", + "Counter": "0,1,2,3", + "EventCode": "0x8c", + "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH0", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x50", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue Allocations", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.PCH1", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0xa0", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0", + "PerPkg": "1", + "UMask": "0x10", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1", + "PerPkg": "1", + "UMask": "0x20", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0", + "PerPkg": "1", + "UMask": "0x40", + "Unit": "IMC" + }, + { + "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x22", + "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1", + "PerPkg": "1", + "UMask": "0x80", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x84", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x85", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0", + "Counter": "0,1,2,3", + "EventCode": "0x86", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0", + "PerPkg": "1", + "Unit": "IMC" + }, + { + "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1", + "Counter": "0,1,2,3", + "EventCode": "0x87", + "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1", + "PerPkg": "1", + "Unit": "IMC" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-power.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-power.json new file mode 100644 index 000000000000..5eddf09d9b85 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/uncore-power.json @@ -0,0 +1,109 @@ +[ + { + "BriefDescription": "PCU Clockticks", + "Counter": "0,1,2,3", + "EventCode": "0x01", + "EventName": "UNC_P_CLOCKTICKS", + "PerPkg": "1", + "PublicDescription": "PCU Clockticks: The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", + "Unit": "PCU" + }, + { + "BriefDescription": "Thermal Strongest Upper Limit Cycles", + "Counter": "0,1,2,3", + "EventCode": "0x04", + "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.", + "Unit": "PCU" + }, + { + "BriefDescription": "Power Strongest Upper Limit Cycles", + "Counter": "0,1,2,3", + "EventCode": "0x05", + "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.", + "Unit": "PCU" + }, + { + "BriefDescription": "Cycles spent changing Frequency", + "Counter": "0,1,2,3", + "EventCode": "0x74", + "EventName": "UNC_P_FREQ_TRANS_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", + "Unit": "PCU" + }, + { + "BriefDescription": "Package C State Residency - C2E", + "Counter": "0,1,2,3", + "EventCode": "0x2b", + "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", + "Unit": "PCU" + }, + { + "BriefDescription": "Package C State Residency - C6", + "Counter": "0,1,2,3", + "EventCode": "0x2d", + "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", + "Unit": "PCU" + }, + { + "BriefDescription": "Number of cores in C0", + "Counter": "0,1,2,3", + "EventCode": "0x35", + "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0", + "PerPkg": "1", + "PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "Unit": "PCU" + }, + { + "BriefDescription": "Number of cores in C3", + "Counter": "0,1,2,3", + "EventCode": "0x36", + "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C3", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Number of cores in C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "Unit": "PCU" + }, + { + "BriefDescription": "Number of cores in C6", + "Counter": "0,1,2,3", + "EventCode": "0x37", + "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6", + "PerPkg": "1", + "PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", + "Unit": "PCU" + }, + { + "BriefDescription": "External Prochot", + "Counter": "0,1,2,3", + "EventCode": "0x0a", + "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", + "Unit": "PCU" + }, + { + "BriefDescription": "Internal Prochot", + "Counter": "0,1,2,3", + "EventCode": "0x09", + "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES", + "Experimental": "1", + "PerPkg": "1", + "PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", + "Unit": "PCU" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/graniterapids/virtual-memory.json b/lib/libpmc/pmu-events/arch/x86/graniterapids/virtual-memory.json new file mode 100644 index 000000000000..b1a077268320 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/graniterapids/virtual-memory.json @@ -0,0 +1,185 @@ +[ + { + "BriefDescription": "Loads that miss the DTLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 1G page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G", + "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 4K page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Stores that miss the DTLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 1G page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G", + "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x8" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 4K page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.STLB_HIT", + "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.", + "SampleAfterValue": "100003", + "UMask": "0x10" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2" + }, + { + "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10" + } +]
\ No newline at end of file diff --git a/lib/libpmc/pmu-events/arch/x86/mapfile.csv b/lib/libpmc/pmu-events/arch/x86/mapfile.csv index 87a68de4a909..d06440960441 100644 --- a/lib/libpmc/pmu-events/arch/x86/mapfile.csv +++ b/lib/libpmc/pmu-events/arch/x86/mapfile.csv @@ -11,6 +11,7 @@ GenuineIntel-6-35,v4,bonnell,core GenuineIntel-6-5C,v8,goldmont,core GenuineIntel-6-5F,v8,goldmont,core GenuineIntel-6-7A,v1,goldmontplus,core +GenuineIntel-6-A[DE],v1,graniterapids,core GenuineIntel-6-CF,v1,emeraldrapids,core GenuineIntel-6-3C,v24,haswell,core GenuineIntel-6-45,v24,haswell,core @@ -55,4 +56,5 @@ AuthenticAMD-23-[012][0-9A-F],v2,amdzen1,core AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core AuthenticAMD-25-[0245][[:xdigit:]],v1,amdzen3,core AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen4,core +AuthenticAMD-26-[[:xdigit:]]+,v1,amdzen5,core HygonGenuine-24-00,v2,amdzen1,core diff --git a/lib/libsys/Makefile.sys b/lib/libsys/Makefile.sys index 5f149170b974..eac28fbd2fe9 100644 --- a/lib/libsys/Makefile.sys +++ b/lib/libsys/Makefile.sys @@ -73,6 +73,7 @@ INTERPOSED = \ open \ openat \ pdfork \ + pdwait \ poll \ ppoll \ pselect \ @@ -495,8 +496,10 @@ MLINKS+=ntp_adjtime.2 ntp_gettime.2 MLINKS+=open.2 openat.2 MLINKS+=pathconf.2 fpathconf.2 MLINKS+=pathconf.2 lpathconf.2 -MLINKS+=pdfork.2 pdgetpid.2\ - pdfork.2 pdkill.2 +MLINKS+=pdfork.2 pdgetpid.2 \ + pdfork.2 pdkill.2 \ + pdfork.2 pdrfork.2 \ + pdfork.2 pdwait.2 MLINKS+=pipe.2 pipe2.2 MLINKS+=poll.2 ppoll.2 MLINKS+=rctl_add_rule.2 rctl_get_limits.2 \ diff --git a/lib/libsys/Symbol.sys.map b/lib/libsys/Symbol.sys.map index e3fd8ac10621..46767f5b6a4d 100644 --- a/lib/libsys/Symbol.sys.map +++ b/lib/libsys/Symbol.sys.map @@ -182,6 +182,7 @@ FBSD_1.0 { rename; revoke; rfork; + rfork_thread; rmdir; rtprio; rtprio_thread; @@ -389,6 +390,11 @@ FBSD_1.8 { setgroups; }; +FBSD_1.9 { + pdrfork; + pdrfork_thread; +}; + FBSDprivate_1.0 { /* Add entries in sort(1) order */ __set_error_selector; diff --git a/lib/libsys/_libsys.h b/lib/libsys/_libsys.h index 12417b572a60..2af6d2bb22b3 100644 --- a/lib/libsys/_libsys.h +++ b/lib/libsys/_libsys.h @@ -472,6 +472,8 @@ typedef int (__sys_setgroups_t)(int, const gid_t *); typedef int (__sys_jail_attach_jd_t)(int); typedef int (__sys_jail_remove_jd_t)(int); typedef int (__sys_kexec_load_t)(uint64_t, u_long, struct kexec_segment *, u_long); +typedef int (__sys_pdrfork_t)(int *, int, int); +typedef int (__sys_pdwait_t)(int, int *, int, struct __wrusage *, struct __siginfo *); _Noreturn void __sys__exit(int rval); int __sys_fork(void); @@ -879,6 +881,8 @@ int __sys_setgroups(int gidsetsize, const gid_t * gidset); int __sys_jail_attach_jd(int fd); int __sys_jail_remove_jd(int fd); int __sys_kexec_load(uint64_t entry, u_long nseg, struct kexec_segment * segments, u_long flags); +int __sys_pdrfork(int * fdp, int pdflags, int rfflags); +int __sys_pdwait(int fd, int * status, int options, struct __wrusage * wrusage, struct __siginfo * info); __END_DECLS #endif /* __LIBSYS_H_ */ diff --git a/lib/libsys/aarch64/Makefile.sys b/lib/libsys/aarch64/Makefile.sys index 04b95602c580..c7aaf52d535a 100644 --- a/lib/libsys/aarch64/Makefile.sys +++ b/lib/libsys/aarch64/Makefile.sys @@ -1,6 +1,8 @@ MIASM:= ${MIASM:Nfreebsd[467]_*} SRCS+= __vdso_gettc.c \ + pdrfork_thread_gen.c \ + rfork_thread_gen.c \ sched_getcpu_gen.c MDASM= cerror.S \ diff --git a/lib/libsys/amd64/Makefile.sys b/lib/libsys/amd64/Makefile.sys index 8134bdc422a6..fc8ebe796081 100644 --- a/lib/libsys/amd64/Makefile.sys +++ b/lib/libsys/amd64/Makefile.sys @@ -4,6 +4,7 @@ SRCS+= \ amd64_set_fsbase.c \ amd64_set_gsbase.c \ amd64_set_tlsbase.c \ - rfork_thread.S + rfork_thread.S \ + pdrfork_thread.S MDASM= vfork.S cerror.S getcontext.S diff --git a/lib/libsys/amd64/Symbol.sys.map b/lib/libsys/amd64/Symbol.sys.map index 11e0507b6613..67a8df7d2a8c 100644 --- a/lib/libsys/amd64/Symbol.sys.map +++ b/lib/libsys/amd64/Symbol.sys.map @@ -1,5 +1,4 @@ FBSD_1.0 { - rfork_thread; amd64_get_fsbase; amd64_get_gsbase; amd64_set_fsbase; diff --git a/lib/libsys/amd64/pdrfork_thread.S b/lib/libsys/amd64/pdrfork_thread.S new file mode 100644 index 000000000000..cd187ae456ae --- /dev/null +++ b/lib/libsys/amd64/pdrfork_thread.S @@ -0,0 +1,83 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2000 Peter Wemm <peter@FreeBSD.org> + * Copyright 2003 Alan L. Cox <alc@cs.rice.edu> + * Copyright 2026 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by + * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from + * the FreeBSD Foundation. + */ + +#include <machine/asm.h> +/* + * With thanks to John Dyson for the original version of this. + */ + +#include <SYS.h> + +/* + * %rdi %esi %rdx %rcx %r8 %r9 + * pdrfork_thread(fdp, pdflags, rfflags, stack_addr, start_fnc, start_arg); + * + * fdp Pointer for the resulting fd location + * pdflags Flags as to pdfork. + * rfflags: Flags as to rfork. + * stack_addr: Top of stack for thread. + * start_fnc: Address of thread function to call in child. + * start_arg: Argument to pass to the thread function in child. + */ + +ENTRY(pdrfork_thread) + pushq %rbx + pushq %r12 + pushq %r13 + movq %r8, %rbx + movq %r9, %r12 + movq %rcx, %r13 + + /* + * Prepare and execute the thread creation syscall + */ + _SYSCALL(pdrfork) + jb 2f + + /* + * Check to see if we are in the parent or child + */ + cmpl $0, %edx + jnz 1f + popq %r13 + popq %r12 + popq %rbx + ret + + /* + * If we are in the child (new thread), then + * set-up the call to the internal subroutine. If it + * returns, then call __exit. + */ +1: + movq %r13, %rsp + movq %r12, %rdi + call *%rbx + movl %eax, %edi + + /* + * Exit system call + */ + _SYSCALL(exit) + + /* + * Branch here if the thread creation fails: + */ +2: + popq %r13 + popq %r12 + popq %rbx + jmp HIDENAME(cerror) +END(pdrfork_thread) + + .section .note.GNU-stack,"",%progbits diff --git a/lib/libsys/arm/Makefile.sys b/lib/libsys/arm/Makefile.sys index 27d78978a2f4..e8b420932031 100644 --- a/lib/libsys/arm/Makefile.sys +++ b/lib/libsys/arm/Makefile.sys @@ -1,6 +1,13 @@ SRCS+= __vdso_gettc.c \ + pdrfork_thread_gen.c \ + rfork_thread_gen.c \ sched_getcpu_gen.c MDASM= \ cerror.S \ vfork.S + +.if ${LIB} == "sys" +.PATH: ${LIBSYS_SRCTOP}/../libc/arm/aeabi +SRCS+= aeabi_unwind_cpp.c +.endif diff --git a/lib/libsys/i386/Makefile.sys b/lib/libsys/i386/Makefile.sys index 2957dc548cf8..69507d930f25 100644 --- a/lib/libsys/i386/Makefile.sys +++ b/lib/libsys/i386/Makefile.sys @@ -1,7 +1,7 @@ SRCS+= i386_get_fsbase.c i386_get_gsbase.c i386_get_ioperm.c i386_get_ldt.c \ i386_set_fsbase.c i386_set_gsbase.c i386_set_ioperm.c i386_set_ldt.c \ i386_clr_watch.c i386_set_watch.c i386_vm86.c \ - rfork_thread.S + rfork_thread.S pdrfork_thread.S MDASM= vfork.S cerror.S getcontext.S syscall.S diff --git a/lib/libsys/i386/Symbol.sys.map b/lib/libsys/i386/Symbol.sys.map index 6b3169336a3f..03a28f9e486c 100644 --- a/lib/libsys/i386/Symbol.sys.map +++ b/lib/libsys/i386/Symbol.sys.map @@ -10,7 +10,6 @@ FBSD_1.0 { i386_set_ldt; i386_set_watch; i386_vm86; - rfork_thread; }; FBSD_1.6 { diff --git a/lib/libsys/i386/pdrfork_thread.S b/lib/libsys/i386/pdrfork_thread.S new file mode 100644 index 000000000000..92a45fc4783f --- /dev/null +++ b/lib/libsys/i386/pdrfork_thread.S @@ -0,0 +1,101 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2000 Peter Wemm <peter@FreeBSD.org> + * Copyright 2026 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by + * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from + * the FreeBSD Foundation. + */ + +#include <machine/asm.h> +/* + * With thanks to John Dyson for the original version of this. + */ + +#include <SYS.h> + +/* + * 8 12 16 20 24 28 + * rfork_thread(fdp, pdflags rfflags, stack_addr, start_fnc, start_arg); + * + * fdp Pointer for the resulting fd location + * pdflags Flags as to pdfork. + * rfflags: Flags as to rfork. + * stack_addr: Top of stack for thread. + * start_fnc: Address of thread function to call in child. + * start_arg: Argument to pass to the thread function in child. + */ + +ENTRY(pdrfork_thread) + pushl %ebp + movl %esp, %ebp + pushl %esi + + /* + * Push thread info onto the new thread's stack + */ + movl 20(%ebp), %esi # get stack addr + + subl $4, %esi + movl 28(%ebp), %eax # get start argument + movl %eax, (%esi) + + subl $4, %esi + movl 24(%ebp), %eax # get start thread address + movl %eax, (%esi) + + /* + * Prepare and execute the thread creation syscall + */ + pushl 16(%ebp) + pushl 12(%ebp) + pushl 8(%ebp) + pushl $0 + _SYSCALL(pdrfork) + jb 2f + + /* + * Check to see if we are in the parent or child + */ + cmpl $0, %edx + jnz 1f + addl $16, %esp + popl %esi + movl %ebp, %esp + popl %ebp + ret + .p2align 2 + + /* + * If we are in the child (new thread), then + * set-up the call to the internal subroutine. If it + * returns, then call __exit. + */ +1: + movl %esi,%esp + popl %eax + call *%eax + addl $4, %esp + + /* + * Exit system call + */ + pushl %eax + pushl $0 + _SYSCALL(exit) + + /* + * Branch here if the thread creation fails: + */ +2: + addl $16, %esp + popl %esi + movl %ebp, %esp + popl %ebp + jmp HIDENAME(cerror) +END(pdrfork_thread) + + .section .note.GNU-stack,"",%progbits diff --git a/lib/libsys/interposing_table.c b/lib/libsys/interposing_table.c index 31cdb1511ab8..0151364f89d2 100644 --- a/lib/libsys/interposing_table.c +++ b/lib/libsys/interposing_table.c @@ -72,6 +72,7 @@ static interpos_func_t __libsys_interposing[INTERPOS_MAX] = { SLOT(fdatasync, __sys_fdatasync), SLOT(clock_nanosleep, __sys_clock_nanosleep), SLOT(pdfork, __sys_pdfork), + SLOT(pdwait, __sys_pdwait), }; #undef SLOT diff --git a/lib/libsys/kqueue.2 b/lib/libsys/kqueue.2 index a8ebabf02cf7..97532c530b20 100644 --- a/lib/libsys/kqueue.2 +++ b/lib/libsys/kqueue.2 @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd September 12, 2025 +.Dd January 24, 2026 .Dt KQUEUE 2 .Os .Sh NAME @@ -971,13 +971,13 @@ The .Fn kqueuex system call and -.Fn kevent1 +.Fn kqueue1 function first appeared in .Fx 14.0 . .Sh AUTHORS The -.Fn kqueue -system and this manual page were written by +.Em kqueue +subsystem and this manual page were written by .An Jonathan Lemon Aq Mt jlemon@FreeBSD.org . .Sh BUGS .Pp diff --git a/lib/libsys/open.2 b/lib/libsys/open.2 index a0e905a8f375..4527100252eb 100644 --- a/lib/libsys/open.2 +++ b/lib/libsys/open.2 @@ -72,11 +72,18 @@ function is equivalent to the .Fn open function except in the case where the .Fa path -specifies a relative path. +specifies a relative path or the +.Va O_EMPTY_PATH +flag is specified. For .Fn openat and relative .Fa path , +when +.Fa fd +references a directory, and the +.Va O_EMPTY_PATH +flag is not specified, the file to be opened is determined relative to the directory associated with the file descriptor .Fa fd @@ -104,6 +111,14 @@ it ignores the .Fa fd argument. .Pp +When +.Fn openat +is called with the +.Fa fd +argument that does not reference a directory, the call fails unless +.Va O_EMPTY_PATH +flag is specified, see below. +.Pp In .Xr capsicum 4 capability mode, @@ -421,9 +436,11 @@ by the descriptor at the time of the call. .Pp .Dv O_PATH -returns a file descriptor that can be used as a directory file descriptor for +returns a file descriptor that can be used as the first argument for .Fn openat -and other system calls taking a file descriptor argument, like +and other filesystem-related system calls collectively named +.Fn *at +taking a file descriptor argument, like .Xr fstatat 2 and others. The other functionality of the returned file descriptor is limited to diff --git a/lib/libsys/pdfork.2 b/lib/libsys/pdfork.2 index c5319177f90f..49226cf069de 100644 --- a/lib/libsys/pdfork.2 +++ b/lib/libsys/pdfork.2 @@ -30,24 +30,36 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd October 14, 2018 +.Dd January 20, 2026 .Dt PDFORK 2 .Os .Sh NAME .Nm pdfork , +.Nm pdrfork , .Nm pdgetpid , -.Nm pdkill +.Nm pdkill , +.Nm pdwait .Nd System calls to manage process descriptors .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In sys/procdesc.h .Ft pid_t -.Fn pdfork "int *fdp" "int flags" +.Fn pdfork "int *fdp" "int pdflags" +.Ft pid_t +.Fn pdrfork "int *fdp" "int pdflags" "int rfflags" .Ft int .Fn pdgetpid "int fd" "pid_t *pidp" .Ft int .Fn pdkill "int fd" "int signum" +.Ft int +.Fo pdwait +.Fa "int fd" +.Fa "int *status" +.Fa "int options" +.Fa "struct __wrusage *wrusage" +.Fa "struct __siginfo *info" +.Fc .Sh DESCRIPTION Process descriptors are special file descriptors that represent processes, and are created using @@ -63,8 +75,9 @@ will not cause .Dv SIGCHLD on termination. .Fn pdfork -can accept the flags: -.Bl -tag -width ".Dv PD_DAEMON" +can accept the +.Fa pdflags: +.Bl -tag -width PD_CLOEXEC .It Dv PD_DAEMON Instead of the default terminate-on-close behaviour, allow the process to live until it is explicitly killed with @@ -80,6 +93,35 @@ capability mode (see Set close-on-exec on process descriptor. .El .Pp +The +.Fn pdrfork +system call is a variant of +.Fn pdfork +that also takes the +.Fa rfflags +argument to control sharing of process resources between the caller +and the new process. +Like +.Fn pdfork , +the function writes the process descriptor referencing the created +process into the location pointed to by the +.Fa fdp +argument. +See +.Xr rfork 2 +for a description of the possible +.Fa rfflag +flags. +The +.Fn pdrfork +system call requires that both the +.Va RFPROC +and +.Va RFPROCDESC +flags, or +.Va RFSPAWN +flag are specified. +.Pp .Fn pdgetpid queries the process ID (PID) in the process descriptor .Fa fd . @@ -91,6 +133,16 @@ except that it accepts a process descriptor, .Fa fd , rather than a PID. .Pp +The +.Fn pdwait +system call allows the calling thread to wait and retrieve +the status information on the process referenced by the +.Fa fd +process descriptor. +See the description of the +.Xr wait6 +system call for the behavior specification. +.Pp The following system calls also have effects specific to process descriptors: .Pp .Xr fstat 2 @@ -126,15 +178,24 @@ is set; if the process is still alive and this is the last reference to the process descriptor, the process will be terminated with the signal .Dv SIGKILL . +The PID of the referenced process is not reused until the process +descriptor is closed, +whether or not the zombie process is reaped by +.Fn pdwait , +.Xr wait6 , +or similar system calls. .Sh RETURN VALUES .Fn pdfork -returns a PID, 0 or -1, as +and +.Fn pdrfork +return a PID, 0 or -1, as .Xr fork 2 does. .Pp -.Fn pdgetpid +.Fn pdgetpid , +.Fn pdkill , and -.Fn pdkill +.Fn pdwait return 0 on success and -1 on failure. .Sh ERRORS These functions may return the same error numbers as their PID-based equivalents @@ -144,6 +205,24 @@ may return the same error numbers as .Xr fork 2 ) , with the following additions: .Bl -tag -width Er +.It Bq Er EFAULT +The copyout of the resulting file descriptor value to the memory pointed +to by +.Fa fdp +failed. +.Pp +Note that the child process was already created when this condition +is detected, +and the child continues execution, same as the parent. +If this error must be handled, it is advisable to memoize the +.Fn getpid +result before the call to +.Fn pdfork +or +.Fn pdrfork , +and compare it to the value returned by +.Fn getpid +after, to see if code is executing in parent or child. .It Bq Er EINVAL The signal number given to .Fn pdkill @@ -172,6 +251,12 @@ and .Fn pdkill system calls first appeared in .Fx 9.0 . +The +.Fn pdrfork +and +.Fn pdwait +system calls first appeared in +.Fx 16.0 . .Pp Support for process descriptors mode was developed as part of the .Tn TrustedBSD @@ -184,3 +269,11 @@ and .An Jonathan Anderson Aq Mt jonathan@FreeBSD.org at the University of Cambridge Computer Laboratory with support from a grant from Google, Inc. +The +.Fn pdrfork +and +.Fn pdwait +functions were developed by +.An Konstantin Belousov Aq Mt kib@FreeBSD.org +with input from +.An Alan Somers Aq Mt asomers@FreeBSD.org . diff --git a/lib/libsys/pdrfork_thread_gen.c b/lib/libsys/pdrfork_thread_gen.c new file mode 100644 index 000000000000..d36b2cc11993 --- /dev/null +++ b/lib/libsys/pdrfork_thread_gen.c @@ -0,0 +1,34 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2026 The FreeBSD Foundation + * + * This software were developed by + * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from + * the FreeBSD Foundation. + */ + +#include <sys/types.h> +#include <sys/procdesc.h> +#include <errno.h> +#include <unistd.h> + +pid_t +pdrfork_thread(int *fdp, int pdflags, int rfflags, void *stack_addr, + int (*start_fn)(void *), void *arg) +{ + pid_t res; + int ret; + + /* See comment in rfork_thread_gen.c. */ + if (stack_addr != NULL) { + errno = EOPNOTSUPP; + return (-1); + } + res = pdrfork(fdp, pdflags, rfflags); + if (res == 0) { + ret = start_fn(arg); + _exit(ret); + } + return (res); +} diff --git a/lib/libsys/powerpc/Makefile.sys b/lib/libsys/powerpc/Makefile.sys index 9979d5179f51..f066aae1a4fa 100644 --- a/lib/libsys/powerpc/Makefile.sys +++ b/lib/libsys/powerpc/Makefile.sys @@ -1,4 +1,6 @@ SRCS+= __vdso_gettc.c \ + pdrfork_thread_gen.c \ + rfork_thread_gen.c \ sched_getcpu_gen.c MDASM+= cerror.S diff --git a/lib/libsys/powerpc64/Makefile.sys b/lib/libsys/powerpc64/Makefile.sys index 9979d5179f51..f066aae1a4fa 100644 --- a/lib/libsys/powerpc64/Makefile.sys +++ b/lib/libsys/powerpc64/Makefile.sys @@ -1,4 +1,6 @@ SRCS+= __vdso_gettc.c \ + pdrfork_thread_gen.c \ + rfork_thread_gen.c \ sched_getcpu_gen.c MDASM+= cerror.S diff --git a/lib/libsys/powerpcspe/Makefile.sys b/lib/libsys/powerpcspe/Makefile.sys deleted file mode 100644 index 35909d68cd5e..000000000000 --- a/lib/libsys/powerpcspe/Makefile.sys +++ /dev/null @@ -1,5 +0,0 @@ -CFLAGS+= -I${LIBC_SRCTOP}/powerpc -CFLAGS+= -I${LIBSYS_SRCTOP}/powerpc - -.PATH: ${LIBSYS_SRCTOP}/powerpc -.include "${LIBSYS_SRCTOP}/powerpc/Makefile.sys" diff --git a/lib/libsys/rfork.2 b/lib/libsys/rfork.2 index 649b2acae6ce..bf6db7531126 100644 --- a/lib/libsys/rfork.2 +++ b/lib/libsys/rfork.2 @@ -194,6 +194,7 @@ There is insufficient swap space for the new process. .Xr fork 2 , .Xr intro 2 , .Xr minherit 2 , +.Xr pdrfork 2 , .Xr vfork 2 , .Xr pthread_create 3 , .Xr rfork_thread 3 diff --git a/lib/libsys/rfork_thread_gen.c b/lib/libsys/rfork_thread_gen.c new file mode 100644 index 000000000000..5a8f6e3f650f --- /dev/null +++ b/lib/libsys/rfork_thread_gen.c @@ -0,0 +1,40 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2026 The FreeBSD Foundation + * + * This software were developed by + * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from + * the FreeBSD Foundation. + */ + +#include <errno.h> +#include <unistd.h> + +pid_t +rfork_thread(int flags, void *stack_addr, int (*start_fn)(void *), void *arg) +{ + pid_t res; + int ret; + + /* + * Generic implementation cannot switch stacks. Only + * architecture-specific code knows how to do it. Require + * that caller knows that, and refuse to do operate if the + * stack was supplied. + * + * Note that implementations that do switch stack, would fault + * immediately if the passed stack is NULL. They do not need to + * specifically check for the NULL stack value. + */ + if (stack_addr != NULL) { + errno = EOPNOTSUPP; + return (-1); + } + res = rfork(flags); + if (res == 0) { + ret = start_fn(arg); + _exit(ret); + } + return (res); +} diff --git a/lib/libsys/riscv/Makefile.sys b/lib/libsys/riscv/Makefile.sys index 2ff84735f484..87181565f0be 100644 --- a/lib/libsys/riscv/Makefile.sys +++ b/lib/libsys/riscv/Makefile.sys @@ -1,4 +1,6 @@ SRCS+= __vdso_gettc.c \ + pdrfork_thread_gen.c \ + rfork_thread_gen.c \ sched_getcpu_gen.c MDASM= cerror.S \ diff --git a/lib/libsys/socket.2 b/lib/libsys/socket.2 index 48b8f4e87489..3e4936b091be 100644 --- a/lib/libsys/socket.2 +++ b/lib/libsys/socket.2 @@ -25,7 +25,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd September 28, 2025 +.Dd February 1, 2026 .Dt SOCKET 2 .Os .Sh NAME @@ -52,20 +52,66 @@ These families are defined in the include file .In sys/socket.h . The currently understood formats are: .Pp -.Bd -literal -offset indent -compact -PF_LOCAL Host-internal protocols (alias for PF_UNIX), -PF_UNIX Host-internal protocols, -PF_INET Internet version 4 protocols, -PF_INET6 Internet version 6 protocols, -PF_DIVERT Firewall packet diversion/re-injection, -PF_ROUTE Internal routing protocol, -PF_KEY Internal key-management function, -PF_NETGRAPH Netgraph sockets, -PF_NETLINK Netlink protocols, -PF_BLUETOOTH Bluetooth protocols, -PF_INET_SDP OFED socket direct protocol (IPv4), -PF_HYPERV HyperV sockets -.Ed +.Bl -tag -width "PF_BLUETOOTH" +.It Dv PF_LOCAL +Host-internal protocols (alias for +.Dv PF_UNIX ) . +.It Dv PF_UNIX +Host-internal protocols. +See +.Xr unix 4 . +.It Dv PF_INET +Internet version 4 protocols. +See +.Xr icmp 4 , +.Xr igmp 4 , +.Xr ip 4 , +.Xr sctp 4 , +.Xr tcp 4 , +.Xr udp 4 , +.Xr udplite 4 . +.It Dv PF_INET6 +Internet version 6 protocols. +See +.Xr icmp6 4 , +.Xr ip6 4 , +.Xr mld 4 . +.It Dv PF_DIVERT +Firewall packet diversion/re-injection. +See +.Xr divert 4 . +.It Dv PF_ROUTE +Legacy protocol to control routing tables and receive network +configuration events from the kernel. +New applications should prefer +.Xr rtnetlink 4 +over +.Xr route 4 . +.It Dv PF_KEY +Internal key-management function. +See +.Xr ipsec 4 . +.It Dv PF_NETGRAPH +Netgraph sockets. +See +.Xr netgraph 3 +and +.Xr ng_socket 4 . +.It Dv PF_NETLINK +Netlink protocols. +See +.Xr genetlink 4 , +.Xr netlink 4 , +.Xr rtnetlink 4 . +.It Dv PF_BLUETOOTH +Bluetooth protocols. +See +.Xr ng_btsocket 4 . +.It Dv PF_INET_SDP +OFED socket direct protocol (IPv4). +.It Dv PF_HYPERV +HyperV sockets. +.El .Pp Each protocol family is connected to an address family, which has the same name except that the prefix is @@ -82,22 +128,47 @@ which specifies the semantics of communication. Currently defined types are: .Pp -.Bd -literal -offset indent -compact -SOCK_STREAM Stream socket, -SOCK_DGRAM Datagram socket, -SOCK_RAW Raw-protocol interface, -SOCK_SEQPACKET Sequenced packet stream -.Ed +.Bl -tag -width "SOCK_SEQPACKET" +.It Dv SOCK_STREAM +Stream socket. +.It Dv SOCK_DGRAM +Datagram socket. +.It Dv SOCK_RAW +Raw-protocol interface. +.It Dv SOCK_SEQPACKET +Sequenced packet stream. +.El .Pp Additionally, the following flags are allowed in the .Fa type argument: .Pp -.Bd -literal -offset indent -compact -SOCK_CLOEXEC Set close-on-exec on the new descriptor, -SOCK_CLOFORK Set close-on-fork on the new descriptor, -SOCK_NONBLOCK Set non-blocking mode on the new socket -.Ed +.Bl -tag -width "SOCK_NONBLOCK" +.It Dv SOCK_CLOEXEC +Set close-on-exec on the new socket. +See +.Xr fcntl 2 +.Dv FD_CLOEXEC +flag for +.Dv F_GETFD +command. +.It Dv SOCK_CLOFORK +Set close-on-fork on the new socket. +See +.Xr fcntl 2 +.Dv FD_CLOFORK +flag for +.Dv F_GETFD +command. +.It Dv SOCK_NONBLOCK +Set non-blocking mode on the new socket. +See +.Xr fcntl 2 +.Dv O_NONBLOCK +flag for +.Dv F_SETFL +command. +.El .Pp The .Fa protocol @@ -423,10 +494,23 @@ The socket type is not supported by the protocol. .Xr write 2 , .Xr CMSG_DATA 3 , .Xr getprotoent 3 , +.Xr netgraph 3 , .Xr divert 4 , +.Xr genetlink 4 , +.Xr icmp 4 , +.Xr icmp6 4 , +.Xr igmp 4 , .Xr ip 4 , .Xr ip6 4 , -.Xr netgraph 4 , +.Xr ipsec 4 , +.Xr netintro 4 , +.Xr netlink 4 , +.Xr ng_socket 4 , +.Xr route 4 , +.Xr rtnetlink 4 , +.Xr sctp 4 , +.Xr tcp 4 , +.Xr udp 4 , .Xr protocols 5 .Rs .%T "An Introductory 4.3 BSD Interprocess Communication Tutorial" diff --git a/lib/libsys/syscalls.map b/lib/libsys/syscalls.map index d00c862eb462..bc8574681132 100644 --- a/lib/libsys/syscalls.map +++ b/lib/libsys/syscalls.map @@ -819,4 +819,8 @@ FBSDprivate_1.0 { __sys_jail_remove_jd; _kexec_load; __sys_kexec_load; + _pdrfork; + __sys_pdrfork; + _pdwait; + __sys_pdwait; }; diff --git a/lib/libsys/wait.2 b/lib/libsys/wait.2 index eeddf77aeac7..ca289c69f188 100644 --- a/lib/libsys/wait.2 +++ b/lib/libsys/wait.2 @@ -656,6 +656,7 @@ do not specify a valid set of processes. .El .Sh SEE ALSO .Xr _exit 2 , +.Xr pdwait 2 , .Xr procctl 2 , .Xr ptrace 2 , .Xr sigaction 2 , diff --git a/lib/libsysdecode/Makefile.depend b/lib/libsysdecode/Makefile.depend index a9ccf3aa0870..0f4942adecfe 100644 --- a/lib/libsysdecode/Makefile.depend +++ b/lib/libsysdecode/Makefile.depend @@ -3,7 +3,6 @@ DIRDEPS = \ cddl/lib/libdtrace \ cddl/lib/libnvpair \ - cddl/lib/libtpool \ cddl/lib/libzfs \ cddl/lib/libzfs_core \ cddl/lib/libzfsbootenv \ diff --git a/lib/libthr/pthread.map b/lib/libthr/pthread.map index 3a5353a32dc3..ad0c28de98c1 100644 --- a/lib/libthr/pthread.map +++ b/lib/libthr/pthread.map @@ -342,3 +342,7 @@ FBSD_1.8 { pthread_signals_unblock_np; pthread_sigqueue; }; + +FBSD_1.9 { + pthread_tryjoin_np; +}; diff --git a/lib/libthr/tests/Makefile b/lib/libthr/tests/Makefile index 017b740157dc..5ffcd7b046c8 100644 --- a/lib/libthr/tests/Makefile +++ b/lib/libthr/tests/Makefile @@ -36,6 +36,7 @@ NETBSD_ATF_TESTS_SH+= resolv_test ATF_TESTS_C+= atfork_test ATF_TESTS_C+= umtx_op_test ATF_TESTS_C+= pthread_sigqueue_test +ATF_TESTS_C+= pthread_tryjoin_test LIBADD+= pthread LIBADD.fpu_test+= m diff --git a/lib/libthr/tests/pthread_tryjoin_test.c b/lib/libthr/tests/pthread_tryjoin_test.c new file mode 100644 index 000000000000..ba88ac2ed6f5 --- /dev/null +++ b/lib/libthr/tests/pthread_tryjoin_test.c @@ -0,0 +1,62 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2025 The FreeBSD Foundation + * + * This software was developed by Konstantin Belousov <kib@FreeBSD.org> + * under sponsorship from the FreeBSD Foundation. + */ + +#include <atf-c.h> +#include <err.h> +#include <errno.h> +#include <pthread.h> +#include <pthread_np.h> +#include <stdatomic.h> +#include <stdio.h> +#include <unistd.h> + +static atomic_int finish; + +static void * +thr_fun(void *arg) +{ + while (atomic_load_explicit(&finish, memory_order_relaxed) != 1) + sleep(1); + atomic_store_explicit(&finish, 2, memory_order_relaxed); + return (arg); +} + +ATF_TC(pthread_tryjoin); +ATF_TC_HEAD(pthread_tryjoin, tc) +{ + atf_tc_set_md_var(tc, "descr", + "Checks pthread_tryjoin(3)"); +} + +ATF_TC_BODY(pthread_tryjoin, tc) +{ + pthread_t thr; + void *retval; + int error, x; + + error = pthread_create(&thr, NULL, thr_fun, &x); + ATF_REQUIRE_EQ(error, 0); + + error = pthread_tryjoin_np(thr, &retval); + ATF_REQUIRE_EQ(error, EBUSY); + + atomic_store_explicit(&finish, 1, memory_order_relaxed); + while (atomic_load_explicit(&finish, memory_order_relaxed) != 2) + sleep(1); + + error = pthread_tryjoin_np(thr, &retval); + ATF_REQUIRE_EQ(error, 0); + ATF_REQUIRE_EQ(retval, &x); +} + +ATF_TP_ADD_TCS(tp) +{ + ATF_TP_ADD_TC(tp, pthread_tryjoin); + return (atf_no_error()); +} diff --git a/lib/libthr/thread/thr_join.c b/lib/libthr/thread/thr_join.c index 53f28daa258d..33eb18638169 100644 --- a/lib/libthr/thread/thr_join.c +++ b/lib/libthr/thread/thr_join.c @@ -34,29 +34,39 @@ #include "thr_private.h" int _pthread_peekjoin_np(pthread_t pthread, void **thread_return); +int _pthread_tryjoin_np(pthread_t pthread, void **thread_return); int _pthread_timedjoin_np(pthread_t pthread, void **thread_return, const struct timespec *abstime); -static int join_common(pthread_t, void **, const struct timespec *, bool peek); +static int join_common(pthread_t, void **, const struct timespec *, bool peek, + bool try); __weak_reference(_thr_join, pthread_join); __weak_reference(_thr_join, _pthread_join); __weak_reference(_pthread_timedjoin_np, pthread_timedjoin_np); __weak_reference(_pthread_peekjoin_np, pthread_peekjoin_np); +__weak_reference(_pthread_tryjoin_np, pthread_tryjoin_np); -static void backout_join(void *arg) +static void +backout_join(struct pthread *pthread, struct pthread *curthread) { - struct pthread *pthread = (struct pthread *)arg; - struct pthread *curthread = _get_curthread(); - THR_THREAD_LOCK(curthread, pthread); pthread->joiner = NULL; THR_THREAD_UNLOCK(curthread, pthread); } +static void +backout_join_pop(void *arg) +{ + struct pthread *pthread = (struct pthread *)arg; + struct pthread *curthread = _get_curthread(); + + backout_join(pthread, curthread); +} + int _thr_join(pthread_t pthread, void **thread_return) { - return (join_common(pthread, thread_return, NULL, false)); + return (join_common(pthread, thread_return, NULL, false, false)); } int @@ -67,13 +77,34 @@ _pthread_timedjoin_np(pthread_t pthread, void **thread_return, abstime->tv_nsec >= 1000000000) return (EINVAL); - return (join_common(pthread, thread_return, abstime, false)); + return (join_common(pthread, thread_return, abstime, false, false)); } int _pthread_peekjoin_np(pthread_t pthread, void **thread_return) { - return (join_common(pthread, thread_return, NULL, true)); + return (join_common(pthread, thread_return, NULL, true, false)); +} + +int +_pthread_tryjoin_np(pthread_t pthread, void **thread_return) +{ + return (join_common(pthread, thread_return, NULL, false, true)); +} + +static void +join_common_joined(struct pthread *pthread, struct pthread *curthread, + void **thread_return) +{ + void *tmp; + + tmp = pthread->ret; + pthread->flags |= THR_FLAGS_DETACHED; + pthread->joiner = NULL; + _thr_try_gc(curthread, pthread); /* thread lock released */ + + if (thread_return != NULL) + *thread_return = tmp; } /* @@ -82,11 +113,10 @@ _pthread_peekjoin_np(pthread_t pthread, void **thread_return) */ static int join_common(pthread_t pthread, void **thread_return, - const struct timespec *abstime, bool peek) + const struct timespec *abstime, bool peek, bool try) { struct pthread *curthread = _get_curthread(); struct timespec ts, ts2, *tsp; - void *tmp; long tid; int ret; @@ -120,12 +150,22 @@ join_common(pthread_t pthread, void **thread_return, return (ret); } + /* Only try to join. */ + if (try) { + if (pthread->tid != TID_TERMINATED) { + THR_THREAD_UNLOCK(curthread, pthread); + return (EBUSY); + } + join_common_joined(pthread, curthread, thread_return); + return (0); + } + /* Set the running thread to be the joiner: */ pthread->joiner = curthread; THR_THREAD_UNLOCK(curthread, pthread); - THR_CLEANUP_PUSH(curthread, backout_join, pthread); + THR_CLEANUP_PUSH(curthread, backout_join_pop, pthread); _thr_cancel_enter(curthread); tid = pthread->tid; @@ -149,20 +189,12 @@ join_common(pthread_t pthread, void **thread_return, _thr_cancel_leave(curthread, 0); THR_CLEANUP_POP(curthread, 0); - if (ret == ETIMEDOUT) { - THR_THREAD_LOCK(curthread, pthread); - pthread->joiner = NULL; - THR_THREAD_UNLOCK(curthread, pthread); + if (ret == ETIMEDOUT || ret == EBUSY) { + backout_join(pthread, curthread); } else { ret = 0; - tmp = pthread->ret; THR_THREAD_LOCK(curthread, pthread); - pthread->flags |= THR_FLAGS_DETACHED; - pthread->joiner = NULL; - _thr_try_gc(curthread, pthread); /* thread lock released */ - - if (thread_return != NULL) - *thread_return = tmp; + join_common_joined(pthread, curthread, thread_return); } return (ret); } diff --git a/lib/libthr/thread/thr_syscalls.c b/lib/libthr/thread/thr_syscalls.c index 188374a30070..bff2d0624aee 100644 --- a/lib/libthr/thread/thr_syscalls.c +++ b/lib/libthr/thread/thr_syscalls.c @@ -584,6 +584,20 @@ __thr_wait6(idtype_t idtype, id_t id, int *status, int options, return (ret); } +static pid_t +__thr_pdwait(int fd, int *status, int options, struct __wrusage *ru, + siginfo_t *infop) +{ + struct pthread *curthread; + pid_t ret; + + curthread = _get_curthread(); + _thr_cancel_enter(curthread); + ret = __sys_pdwait(fd, status, options, ru, infop); + _thr_cancel_leave(curthread, ret == -1); + return (ret); +} + /* * Cancellation behavior: * Thread may be canceled at start, but if the thread wrote some data, @@ -685,6 +699,7 @@ __thr_interpose_libc(void) SLOT(clock_nanosleep); SLOT(pdfork); SLOT(uexterr_gettext); + SLOT(pdwait); #undef SLOT *(__libc_interposing_slot( INTERPOS__pthread_mutex_init_calloc_cb)) = diff --git a/lib/libusb/Makefile b/lib/libusb/Makefile index 4a0c392f0d8f..b3ef5a061584 100644 --- a/lib/libusb/Makefile +++ b/lib/libusb/Makefile @@ -1,5 +1,4 @@ # -# # Makefile for the FreeBSD specific LibUSB 2.0 # @@ -11,7 +10,8 @@ SRCS+= libusb20_desc.c SRCS+= libusb20_ugen20.c INCS+= libusb20.h INCS+= libusb20_desc.h -MAN= libusb.3 libusb20.3 +MAN= libusb.3 libusb20.3 libusb20_be_device_foreach.3 \ + libusb20_dev_open.3 MKLINT= no NOGCCERROR= PTHREAD_LIBS?= -lpthread @@ -229,7 +229,6 @@ MLINKS += libusb20.3 libusb20_dev_set_config_index.3 MLINKS += libusb20.3 libusb20_dev_get_debug.3 MLINKS += libusb20.3 libusb20_dev_get_fd.3 MLINKS += libusb20.3 libusb20_dev_kernel_driver_active.3 -MLINKS += libusb20.3 libusb20_dev_open.3 MLINKS += libusb20.3 libusb20_dev_process.3 MLINKS += libusb20.3 libusb20_dev_request_sync.3 MLINKS += libusb20.3 libusb20_dev_req_string_sync.3 @@ -261,7 +260,6 @@ MLINKS += libusb20.3 libusb20_be_get_quirk_name.3 MLINKS += libusb20.3 libusb20_be_add_dev_quirk.3 MLINKS += libusb20.3 libusb20_be_remove_dev_quirk.3 MLINKS += libusb20.3 libusb20_be_alloc_default.3 -MLINKS += libusb20.3 libusb20_be_device_foreach.3 MLINKS += libusb20.3 libusb20_be_dequeue_device.3 MLINKS += libusb20.3 libusb20_be_enqueue_device.3 MLINKS += libusb20.3 libusb20_be_free.3 diff --git a/lib/libusb/libusb10.c b/lib/libusb/libusb10.c index e226def0b7b6..b3af017ab980 100644 --- a/lib/libusb/libusb10.c +++ b/lib/libusb/libusb10.c @@ -1259,6 +1259,9 @@ libusb10_isoc_proxy(struct libusb20_transfer *pxfer) libusb20_tr_get_length(pxfer, i); } libusb10_complete_transfer(pxfer, sxfer, LIBUSB_TRANSFER_COMPLETED); + + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; case LIBUSB20_TRANSFER_START: /* setup length(s) */ @@ -1281,6 +1284,9 @@ libusb10_isoc_proxy(struct libusb20_transfer *pxfer) break; default: libusb10_complete_transfer(pxfer, sxfer, libusb10_convert_error(status)); + + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } } @@ -1322,11 +1328,15 @@ libusb10_bulk_intr_proxy(struct libusb20_transfer *pxfer) } else { libusb10_complete_transfer(pxfer, sxfer, LIBUSB_TRANSFER_COMPLETED); } + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } /* check for end of data */ if (sxfer->rem_len == 0) { libusb10_complete_transfer(pxfer, sxfer, LIBUSB_TRANSFER_COMPLETED); + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } /* FALLTHROUGH */ @@ -1353,6 +1363,8 @@ libusb10_bulk_intr_proxy(struct libusb20_transfer *pxfer) default: libusb10_complete_transfer(pxfer, sxfer, libusb10_convert_error(status)); + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } } @@ -1397,11 +1409,15 @@ libusb10_ctrl_proxy(struct libusb20_transfer *pxfer) } else { libusb10_complete_transfer(pxfer, sxfer, LIBUSB_TRANSFER_COMPLETED); } + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } /* check for end of data */ if (sxfer->rem_len == 0) { libusb10_complete_transfer(pxfer, sxfer, LIBUSB_TRANSFER_COMPLETED); + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } /* FALLTHROUGH */ @@ -1442,6 +1458,8 @@ libusb10_ctrl_proxy(struct libusb20_transfer *pxfer) default: libusb10_complete_transfer(pxfer, sxfer, libusb10_convert_error(status)); + /* start next queued transfer, if any */ + libusb10_submit_transfer_sub(libusb20_tr_get_priv_sc0(pxfer), uxfer->endpoint); break; } } diff --git a/lib/libusb/libusb10_hotplug.c b/lib/libusb/libusb10_hotplug.c index 9c46d4926bfa..359c818b5720 100644 --- a/lib/libusb/libusb10_hotplug.c +++ b/lib/libusb/libusb10_hotplug.c @@ -430,5 +430,7 @@ libusb_hotplug_get_user_data(struct libusb_context *ctx, } HOTPLUG_UNLOCK(ctx); - return (handle); + if (handle != NULL) + return (handle->user_data); + return (NULL); } diff --git a/lib/libusb/libusb20_be_device_foreach.3 b/lib/libusb/libusb20_be_device_foreach.3 new file mode 100644 index 000000000000..bc741813ddb0 --- /dev/null +++ b/lib/libusb/libusb20_be_device_foreach.3 @@ -0,0 +1,56 @@ +.\" +.\" Copyright (c) 2025 Rick Parrish <unitrunker@unitrunker.net> +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.Dd December 14, 2025 +.Dt LIBUSB20 3 +.Os +.Sh NAME +.Nm libusb20_be_device_foreach +.Nd iterate connected USB devices +.Sh SYNOPSIS +.Lb libusb +.In libusb20.h +.Ft struct libusb20_device * +.Fn libusb20_be_device_foreach "struct libusb20_backend *pbe" "struct libusb20_device *pdev" +.Sh DESCRIPTION +The +.Nm +function iterates connected USB devices, one device at a time. +A backend pointer for +.Fa pbe +may be obtained by calling +.Xr libusb20_be_alloc_default 3 . +The starting value of +.Fa pdev +is NULL. +Calling +.Nm libusb20_be_device_foreach +again with +.Fa pdev +set to the return value of the previous call yields the next device. +To begin interacting with a USB device, pass the pointer in a call to +.Xr libusb20_dev_open 3 . +.Sh RETURN VALUES +.Nm +returns NULL for end of list. +Otherwise this is a pointer to the next device. +.Sh EXAMPLES +.Bd -literal + #include <libusb20.h> + struct libusb20_backend *be = libusb20_be_alloc_default(); + struct libusb20_device *device = NULL; + while ( (device = libusb20_be_device_foreach(be, device)) != NULL ) { + if (libusb20_dev_open(device, 0) == LIBUSB20_SUCCESS) { + /* do something */ + libusb20_dev_close(device); + } + } + libusb20_be_free(be); +.Ed +.Sh SEE ALSO +.Xr libusb20 3 , +.Xr libusb20_be_alloc_default 3 , +.Xr libusb20_be_free 3 , +.Xr libusb20_dev_open 3 diff --git a/lib/libusb/libusb20_dev_open.3 b/lib/libusb/libusb20_dev_open.3 new file mode 100644 index 000000000000..fa5d1746ff23 --- /dev/null +++ b/lib/libusb/libusb20_dev_open.3 @@ -0,0 +1,69 @@ +.\" +.\" Copyright (c) 2025 Rick Parrish <unitrunker@unitrunker.net> +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.Dd December 14, 2025 +.Dt LIBUSB20 3 +.Os +.Sh NAME +.Nm libusb20_dev_open +.Nd open a USB device to retrieve descriptors and initiate transfers +.Sh SYNOPSIS +.Lb libusb +.In libusb20.h +.Ft int +.Fn libusb20_dev_open "struct libusb20_device *pdev" "uin16_t transfer_max" +.Sh DESCRIPTION +The +.Nm +function opens a USB device to retrieve descriptors and initiate transfers. +.Nm +accepts a pointer as +.Fa pdev +to a libusb20_device obtained from +.Nm libusb20_be_device_foreach . +A zero for +.Fa transfer_max +limits the device to only control transfers. +Call +.Xr libusb20_dev_close 3 +to free resources taken by the open device handle. +.Sh RETURN VALUES +.Nm +returns one of the following to report success or failure: +.Pp +.Bl -tag -width "LIBUSB20_ERROR_NO_DEVICE," -compact +.It Er LIBUSB20_SUCCESS , +The operation succeeds. +.It Er LIBUSB20_ERROR_BUSY , +The device is in use elsewhere. +.It Er LIBUSB20_ERROR_ACCESS , +A permissions issue. +.It Er LIBUSB20_ERROR_NO_DEVICE , +The device detached. +.It Er LIBUSB20_ERROR_NO_MEM +The library couldn't allocate memory. +.El +.Pp +Errors not listed here may be found in +.Xr libusb20 3 +and +.In libusb20.h . +.Sh EXAMPLES +.Bd -literal + #include <libusb20.h> + struct libusb20_backend *be = libusb20_be_alloc_default(); + struct libusb20_device *device = NULL; + while ( (device = libusb20_be_device_foreach(be, device)) != NULL ) { + if (libusb20_dev_open(device, 0) == LIBUSB20_SUCCESS) { + /* do something */ + libusb20_dev_close(device); + } + } + libusb20_be_free(be); +.Ed +.Sh SEE ALSO +.Xr libusb20 3 , +.Xr libusb20_be_device_foreach 3 , +.Xr libusb20_dev_close 3 diff --git a/lib/libutil/login.conf.5 b/lib/libutil/login.conf.5 index 942f3ecd2661..d4bbc1d67780 100644 --- a/lib/libutil/login.conf.5 +++ b/lib/libutil/login.conf.5 @@ -17,7 +17,7 @@ .\" 5. Modifications may be freely made to this file providing the above .\" conditions are met. .\" -.Dd September 25, 2025 +.Dd December 15, 2025 .Dt LOGIN.CONF 5 .Os .Sh NAME @@ -288,7 +288,6 @@ explicitly indicates not to change the umask. .Bl -column passwd_prompt indent indent .It Sy "Name Type Default Description" .\" .It "approve program Program to approve login. -.It "copyright file File containing additional copyright information" .It "host.allow list List of remote host wildcards from which users in" the class may access. .It "host.deny list List of remote host wildcards from which users" diff --git a/lib/libutil/trimdomain.3 b/lib/libutil/trimdomain.3 index 0e65a864936c..114d8d139869 100644 --- a/lib/libutil/trimdomain.3 +++ b/lib/libutil/trimdomain.3 @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd April 7, 1999 +.Dd February 17, 2025 .Dt TRIMDOMAIN 3 .Os .Sh NAME @@ -60,20 +60,18 @@ name does not exceed .Pp If the passed .Ar fullname -is actually a -.Dv DISPLAY +is actually an X11 +.Ev DISPLAY specification of the form .Sm off -.Ar host . domain : nn Oo . -.Ar nn -.Oc +.Ar host . Ar domain : Ar display Op . screen , .Sm on and the domain name is the same as the local domain name, .Fn trimdomain will remove the embedded domain name, copying the screen and display numbers to the end of the base host name and resulting in .Sm off -.Ar host : nn Op . Ar nn . +.Ar host : display Op . Ar screen . .Sm on .Sh RETURN VALUES The diff --git a/lib/msun/powerpc/fenv.c b/lib/msun/powerpc/fenv.c index da11d27a649f..cc0b518bb7c4 100644 --- a/lib/msun/powerpc/fenv.c +++ b/lib/msun/powerpc/fenv.c @@ -28,27 +28,17 @@ #define __fenv_static #include "fenv.h" -#ifdef __SPE__ -#include <sys/types.h> -#include <machine/spr.h> -#endif #ifdef __GNUC_GNU_INLINE__ #error "This file must be compiled with C99 'inline' semantics" #endif -#ifdef __SPE__ -const fenv_t __fe_dfl_env = SPEFSCR_DFLT; -#else const fenv_t __fe_dfl_env = 0x00000000; -#endif extern inline int feclearexcept(int __excepts); extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); -#ifndef __SPE__ extern inline int feraiseexcept(int __excepts); -#endif extern inline int fetestexcept(int __excepts); extern inline int fegetround(void); extern inline int fesetround(int __round); @@ -58,27 +48,3 @@ extern inline int fesetenv(const fenv_t *__envp); extern inline int feupdateenv(const fenv_t *__envp); extern inline int feenableexcept(int __mask); extern inline int fedisableexcept(int __mask); - -#ifdef __SPE__ -#define PMAX 0x7f7fffff -#define PMIN 0x00800000 -int feraiseexcept(int __excepts) -{ - uint32_t spefscr; - - spefscr = mfspr(SPR_SPEFSCR); - mtspr(SPR_SPEFSCR, spefscr | (__excepts & FE_ALL_EXCEPT)); - - if (__excepts & FE_INVALID) - __asm __volatile ("efsdiv %0, %0, %1" :: "r"(0), "r"(0)); - if (__excepts & FE_DIVBYZERO) - __asm __volatile ("efsdiv %0, %0, %1" :: "r"(1.0f), "r"(0)); - if (__excepts & FE_UNDERFLOW) - __asm __volatile ("efsmul %0, %0, %0" :: "r"(PMIN)); - if (__excepts & FE_OVERFLOW) - __asm __volatile ("efsadd %0, %0, %0" :: "r"(PMAX)); - if (__excepts & FE_INEXACT) - __asm __volatile ("efssub %0, %0, %1" :: "r"(PMIN), "r"(1.0f)); - return (0); -} -#endif diff --git a/lib/msun/powerpc/fenv.h b/lib/msun/powerpc/fenv.h index 49fffb196206..09fd4caafb43 100644 --- a/lib/msun/powerpc/fenv.h +++ b/lib/msun/powerpc/fenv.h @@ -40,17 +40,6 @@ typedef __uint32_t fenv_t; typedef __uint32_t fexcept_t; /* Exception flags */ -#ifdef __SPE__ -#define FE_OVERFLOW 0x00000100 -#define FE_UNDERFLOW 0x00000200 -#define FE_DIVBYZERO 0x00000400 -#define FE_INVALID 0x00000800 -#define FE_INEXACT 0x00001000 - -#define FE_ALL_INVALID FE_INVALID - -#define _FPUSW_SHIFT 6 -#else #define FE_INEXACT 0x02000000 #define FE_DIVBYZERO 0x04000000 #define FE_UNDERFLOW 0x08000000 @@ -78,7 +67,6 @@ typedef __uint32_t fexcept_t; FE_VXSNAN | FE_INVALID) #define _FPUSW_SHIFT 22 -#endif #define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_INEXACT | \ FE_ALL_INVALID | FE_OVERFLOW | FE_UNDERFLOW) @@ -101,17 +89,10 @@ extern const fenv_t __fe_dfl_env; FE_OVERFLOW | FE_UNDERFLOW) >> _FPUSW_SHIFT) #ifndef _SOFT_FLOAT -#ifdef __SPE__ -#define __mffs(__env) \ - __asm __volatile("mfspr %0, 512" : "=r" ((__env)->__bits.__reg)) -#define __mtfsf(__env) \ - __asm __volatile("mtspr 512,%0;isync" :: "r" ((__env).__bits.__reg)) -#else #define __mffs(__env) \ __asm __volatile("mffs %0" : "=f" ((__env)->__d)) #define __mtfsf(__env) \ __asm __volatile("mtfsf 255,%0" :: "f" ((__env).__d)) -#endif #else #define __mffs(__env) #define __mtfsf(__env) @@ -167,9 +148,6 @@ fesetexceptflag(const fexcept_t *__flagp, int __excepts) return (0); } -#ifdef __SPE__ -extern int feraiseexcept(int __excepts); -#else __fenv_static inline int feraiseexcept(int __excepts) { @@ -182,7 +160,6 @@ feraiseexcept(int __excepts) __mtfsf(__r); return (0); } -#endif __fenv_static inline int fetestexcept(int __excepts) diff --git a/lib/ncurses/tinfo/Makefile b/lib/ncurses/tinfo/Makefile index 08c2311cd7a9..ab04e518fa19 100644 --- a/lib/ncurses/tinfo/Makefile +++ b/lib/ncurses/tinfo/Makefile @@ -16,6 +16,7 @@ NCURSES_CH_T= cchar_t NCURSES_OK_WCHAR_T= 1 NEED_WCHAR_H= 1 NCURSES_EXT_COLORS= 1 +NCURSES_RGB_COLORS= 0 NCURSES_EXT_FUNCS= 1 NCURSES_CONST= const NCURSES_INLINE= inline @@ -49,7 +50,7 @@ HAVE_TERMIOS_H= 1 HAVE_TERMIO_H= 0 HAVE_VSSCANF= 1 HAVE_STDINT_H= 1 -HEADER_STDBOOL= 1 +HAVE_STDBOOL_H= 1 HAVE_STDNORETURN_H= 0 ENABLE_SIGWINCH= 1 # XXX amd64 1L and int @@ -193,14 +194,6 @@ SYMLINKS+= libtinfow.so ${LIBDIR}/libtermlib.so SYMLINKS+= libtinfow.so ${LIBDIR}/libtinfo.so .endif -DOCSDIR= ${SHAREDIR}/doc/ncurses -DOCS= ncurses-intro.html hackguide.html - -.if ${MK_HTML} != "no" -.PATH: ${NCURSES_DIR}/doc/html -FILESGROUPS= DOCS -.endif - # Generated source .ORDER: names.c codes.c @@ -357,6 +350,7 @@ curses.head: curses.h.in -e "/@NCURSES_OPAQUE_MENU@/s%%${NCURSES_OPAQUE_MENU}%" \ -e "/@NCURSES_OPAQUE_PANEL@/s%%${NCURSES_OPAQUE_PANEL}%" \ -e "/@NCURSES_PATCH@/s%%${NCURSES_PATCH}%" \ + -e "/@NCURSES_RGB_COLORS@/s%%${NCURSES_RGB_COLORS}%" \ -e "/@NCURSES_SIZE_T@/s%%${NCURSES_SIZE_T}%" \ -e "/@NCURSES_TPARM_VARARGS@/s%%${NCURSES_TPARM_VARARGS}%" \ -e "/@NCURSES_WATTR_MACROS@/s%%${NCURSES_WATTR_MACROS}%"\ @@ -369,15 +363,15 @@ curses.head: curses.h.in -e "/@NCURSES_SP_FUNCS@/s%%${NCURSES_SP_FUNCS}%g" \ -e "/@NEED_WCHAR_H@/s%%${NEED_WCHAR_H}%" \ -e "/@USE_CXX_BOOL@/s%%${USE_CXX_BOOL}%" \ + -e "/@USE_STDBOOL_H@/s%%${HAVE_STDBOOL_H}%" \ -e "/@GENERATED_EXT_FUNCS@/s%%generated%g" \ -e "s%@NCURSES_CCHARW_MAX@%5%g" \ -e "s%@cf_cv_1UL@%${ONEUL}%g" \ - -e "s%@cf_cv_builtin_bool@%${BUILTIN_BOOL}%g" \ + -e "s%@USE_BUILTIN_BOOL@%${BUILTIN_BOOL}%g" \ -e "s%@cf_cv_enable_lp64@%${ENABLE_LP64}%g" \ -e "s%@cf_cv_enable_opaque@%${ENABLE_OPAQUE}%g" \ -e "s%@cf_cv_enable_reentrant@%${ENABLE_REENTRANT}%g" \ -e "s%@cf_cv_enable_sigwinch@%${ENABLE_SIGWINCH}%g" \ - -e "s%@cf_cv_header_stdbool_h@%${HEADER_STDBOOL}%g" \ -e "s%@cf_cv_type_of_bool@%${TYPE_OF_BOOL}%g" \ -e "s%@cf_cv_typeof_chtype@%${TYPEOF_CHTYPE}%g" \ -e "s%@cf_cv_typeof_mmask_t@%${TYPEOF_MMASK_T}%g" \ diff --git a/lib/ncurses/tinfo/ncurses_cfg.h b/lib/ncurses/tinfo/ncurses_cfg.h index e26aa49763f0..f231f3b96a05 100644 --- a/lib/ncurses/tinfo/ncurses_cfg.h +++ b/lib/ncurses/tinfo/ncurses_cfg.h @@ -1,6 +1,6 @@ /* include/ncurses_cfg.h. Generated automatically by configure. */ /**************************************************************************** - * Copyright 2020 Thomas E. Dickey * + * Copyright 2020,2024 Thomas E. Dickey * * Copyright 1998-2016,2017 Free Software Foundation, Inc. * * * * Permission is hereby granted, free of charge, to any person obtaining a * @@ -32,7 +32,7 @@ * Author: Thomas E. Dickey 1997-on * ****************************************************************************/ /* - * $Id: ncurses_cfg.hin,v 1.13 2020/03/08 12:37:59 tom Exp $ + * $Id: ncurses_cfg.hin,v 1.15 2024/06/08 14:04:14 tom Exp $ * * Both ncurses_cfg.h and ncurses_def.h are internal header-files used when * building ncurses. @@ -45,14 +45,14 @@ * * See: * https://invisible-island.net/autoconf/ - * ftp://ftp.invisible-island.net/autoconf/ + * https://invisible-island.net/archives/autoconf/ */ #ifndef NC_CONFIG_H #define NC_CONFIG_H #define PACKAGE "ncurses" -#define NCURSES_VERSION "6.5" -#define NCURSES_PATCHDATE 20240427 +#define NCURSES_VERSION "6.6" +#define NCURSES_PATCHDATE 20251230 #define SYSTEM_NAME "FreeBSD" #if 0 #include <stdlib.h> @@ -60,9 +60,6 @@ #define HAVE_LONG_FILE_NAMES 1 #define MIXEDCASE_FILENAMES 1 #define STDC_HEADERS 1 -#define USE_GETCAP 1 -#define BSD_TPUTS 1 -#define HAVE_BSD_CGETENT 1 #define HAVE_SYS_TYPES_H 1 #define HAVE_SYS_STAT_H 1 #define HAVE_STDLIB_H 1 @@ -78,6 +75,7 @@ #define TERMINFO "/usr/share/terminfo" #define HAVE_BIG_CORE 1 #define TERMPATH "/etc/termcap:/usr/share/misc/termcap" +#define USE_GETCAP 1 #define USE_HOME_TERMINFO 1 #define USE_ROOT_ENVIRON 1 #define USE_ROOT_ACCESS 1 @@ -88,6 +86,7 @@ #define HAVE_LINK 1 #define HAVE_SYMLINK 1 #define USE_LINKS 1 +#define BSD_TPUTS 1 #define HAVE_LANGINFO_CODESET 1 #define USE_WIDEC_SUPPORT 1 #define NCURSES_WIDECHAR 1 @@ -106,6 +105,7 @@ #define HAVE_MBSRTOWCS 1 #define HAVE_WCSTOMBS 1 #define HAVE_MBSTOWCS 1 +#define HAVE_WCWIDTH 1 #define NEED_WCHAR_H 1 #define HAVE_FSEEKO 1 #define RGB_PATH "no" @@ -134,6 +134,7 @@ #define NCURSES_WRAP_PREFIX "_nc_" #define USE_ASSUMED_COLOR 1 #define USE_HASHMAP 1 +#define USE_COLORFGBG 1 #define GCC_SCANF 1 #define GCC_SCANFLIKE(fmt,var) __attribute__((format(scanf,fmt,var))) #define GCC_PRINTF 1 @@ -159,7 +160,6 @@ #define HAVE_SYS_TIMES_H 1 #define HAVE_UNISTD_H 1 #define HAVE_WCTYPE_H 1 -#define HAVE_UNISTD_H 1 #define HAVE_GETOPT_H 1 #define HAVE_GETOPT_HEADER 1 #define DECL_ENVIRON 1 @@ -169,7 +169,6 @@ #define HAVE_STRDUP 1 #define HAVE_SYS_TIME_SELECT 1 #define SIG_ATOMIC_T volatile sig_atomic_t -#define HAVE_ERRNO 1 #define HAVE_CLOCK_GETTIME 1 #define HAVE_FPATHCONF 1 #define HAVE_GETCWD 1 @@ -197,6 +196,7 @@ #define HAVE_VSNPRINTF 1 #define HAVE_PATH_TTYS 1 #define HAVE_GETTTYNAM 1 +#define HAVE_BSD_CGETENT 1 #define HAVE_ISASCII 1 #define HAVE_NANOSLEEP 1 #define HAVE_TERMIOS_H 1 @@ -216,8 +216,15 @@ #define HAVE_WORKING_VFORK 1 #define HAVE_WORKING_FORK 1 #define USE_FOPEN_BIN_R 1 +#define HAVE_CC_T 1 +#define HAVE_SPEED_T 1 +#define HAVE_TCFLAG_T 1 +#define HAVE_SIGSET_T 1 #define USE_OPENPTY_HEADER <libutil.h> #define USE_XTERM_PTY 1 +#define USE_STDBOOL_H 1 +#define HAVE_NEW 1 +#define HAVE_EXCEPTION 1 #define HAVE_TYPEINFO 1 #define HAVE_IOSTREAM 1 #define IOSTREAM_NAMESPACE 1 @@ -233,7 +240,7 @@ #define HAVE_FORM_H 1 #define HAVE_LIBFORM 1 #define NCURSES_PATHSEP ':' -#define NCURSES_VERSION_STRING "6.5.20240427" +#define NCURSES_VERSION_STRING "6.6.20251230" #define NCURSES_OSPEED_COMPAT 1 #include <ncurses_def.h> diff --git a/lib/virtual_oss/Makefile b/lib/virtual_oss/Makefile index dc83edd4b980..e6cb5fbe5985 100644 --- a/lib/virtual_oss/Makefile +++ b/lib/virtual_oss/Makefile @@ -1,9 +1,8 @@ .include <src.opts.mk> -SHLIBDIR?= ${LIBDIR}/virtual_oss - SUBDIR+= null \ oss -.include "Makefile.inc" +SUBDIR_PARALLEL= + .include <bsd.subdir.mk> diff --git a/lib/virtual_oss/Makefile.inc b/lib/virtual_oss/Makefile.inc index 877465a5c548..b3f00b075640 100644 --- a/lib/virtual_oss/Makefile.inc +++ b/lib/virtual_oss/Makefile.inc @@ -1,5 +1,9 @@ PACKAGE= sound -.include "../Makefile.inc" +SHLIBDIR= ${LIBDIR}/virtual_oss + +CFLAGS+= -I${SRCTOP}/usr.sbin/virtual_oss/virtual_oss +# XXX virtual_oss/int.h includes samplerate.h, but the library isn't used +CFLAGS+= -I${SYSROOT:U${DESTDIR}}/${INCLUDEDIR}/private/samplerate -LDFLAGS+= -L${.OBJDIR:H:H}/libsamplerate +.include "../Makefile.inc" diff --git a/lib/virtual_oss/bt/Makefile b/lib/virtual_oss/bt/Makefile index 15413b7a1f1e..f7c4077eb8f4 100644 --- a/lib/virtual_oss/bt/Makefile +++ b/lib/virtual_oss/bt/Makefile @@ -1,14 +1,10 @@ SHLIB_NAME= voss_bt.so -SHLIBDIR= ${LIBDIR}/virtual_oss SRCS= bt.c \ avdtp.c \ sbc_encode.c -CFLAGS+= -I${SRCTOP}/usr.sbin/virtual_oss/virtual_oss \ - -I${SRCTOP}/contrib/libsamplerate -LDFLAGS+= -lbluetooth -lsdp -LIBADD= samplerate +LIBADD= bluetooth sdp .if defined(HAVE_LIBAV) CFLAGS+= -I${LOCALBASE:U/usr/local}/include -DHAVE_LIBAV diff --git a/lib/virtual_oss/null/Makefile b/lib/virtual_oss/null/Makefile index ec5c2d40f665..a5a8d4ec6a24 100644 --- a/lib/virtual_oss/null/Makefile +++ b/lib/virtual_oss/null/Makefile @@ -1,10 +1,5 @@ SHLIB_NAME= voss_null.so -SHLIBDIR= ${LIBDIR}/virtual_oss SRCS= null.c -CFLAGS+= -I${SRCTOP}/usr.sbin/virtual_oss/virtual_oss \ - -I${SRCTOP}/contrib/libsamplerate -LIBADD= samplerate - .include <bsd.lib.mk> diff --git a/lib/virtual_oss/oss/Makefile b/lib/virtual_oss/oss/Makefile index 257d7f0c0bae..795d9c45469d 100644 --- a/lib/virtual_oss/oss/Makefile +++ b/lib/virtual_oss/oss/Makefile @@ -1,10 +1,5 @@ SHLIB_NAME= voss_oss.so -SHLIBDIR= ${LIBDIR}/virtual_oss SRCS= oss.c -CFLAGS+= -I${SRCTOP}/usr.sbin/virtual_oss/virtual_oss \ - -I${SRCTOP}/contrib/libsamplerate -LIBADD= samplerate - .include <bsd.lib.mk> diff --git a/lib/virtual_oss/sndio/Makefile b/lib/virtual_oss/sndio/Makefile index 9b5af63a3246..59a19aeb2ba5 100644 --- a/lib/virtual_oss/sndio/Makefile +++ b/lib/virtual_oss/sndio/Makefile @@ -1,12 +1,8 @@ SHLIB_NAME= voss_sndio.so -SHLIBDIR= ${LIBDIR}/virtual_oss SRCS= sndio.c -CFLAGS+= -I${SRCTOP}/usr.sbin/virtual_oss/virtual_oss \ - -I${SRCTOP}/contrib/libsamplerate \ - -I${LOCALBASE:U/usr/local}/include +CFLAGS+= -I${LOCALBASE:U/usr/local}/include LDFLAGS+= -L${LOCALBASE:U/usr/local}/lib -lsndio -LIBADD= samplerate .include <bsd.lib.mk> |
