diff options
391 files changed, 17441 insertions, 3681 deletions
diff --git a/Makefile.inc1 b/Makefile.inc1 index 6c126f01973f..9c6ca7cbc882 100644 --- a/Makefile.inc1 +++ b/Makefile.inc1 @@ -3071,7 +3071,7 @@ _lld= usr.bin/clang/lld ${MK_LLVM_BINUTILS_BOOTSTRAP} != "no" _clang_libs= lib/clang .endif -.if ${MK_LLVM_BINUTILS_BOOTSTRAP}} != "no" +.if ${MK_LLVM_BINUTILS_BOOTSTRAP} != "no" _llvm_binutils= usr.bin/clang/llvm-ar \ usr.bin/clang/llvm-nm \ usr.bin/clang/llvm-objcopy \ diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc index 1b5fe1dec9b3..0f8fd14e2c44 100644 --- a/ObsoleteFiles.inc +++ b/ObsoleteFiles.inc @@ -51,6 +51,9 @@ # xargs -n1 | sort | uniq -d; # done +# 20260324: test file renamed +OLD_FILES+=usr/tests/sys/netinet/tcp_implied_connect + # 20260302: Remove obsolete le(4) ethernet driver OLD_FILES+=usr/share/man/man4/le.4.gz diff --git a/contrib/file/magic/Magdir/filesystems b/contrib/file/magic/Magdir/filesystems index a15e5e74d971..78e9a31bd0e7 100644 --- a/contrib/file/magic/Magdir/filesystems +++ b/contrib/file/magic/Magdir/filesystems @@ -1640,6 +1640,11 @@ >1112 lelong x pending inodes to free %d, >712 lequad x system-wide uuid %0llx, >60 lelong x minimum percentage of free blocks %d, +>1312 lelong&0xa 0x2 soft updates, +>1312 lelong&0xa 0xa soft updates journaling, +>1312 lelong&0x40 0x40 GEOM journaling, +>1312 lelong&0x10 0x10 POSIX.1e ACLs, +>1312 lelong&0x100 0x100 NFSv4 ACLs, >128 lelong 0 TIME optimization >128 lelong 1 SPACE optimization diff --git a/contrib/libcbor/.circleci/config.yml b/contrib/libcbor/.circleci/config.yml index 4391ad8d1e9c..ea030d6b7b8e 100644 --- a/contrib/libcbor/.circleci/config.yml +++ b/contrib/libcbor/.circleci/config.yml @@ -3,30 +3,50 @@ version: 2.1 commands: linux-setup: steps: - - run: sudo apt-get update - # NEEDRESTART_MODE prevents automatic restarts which seem to hang. - - run: sudo NEEDRESTART_MODE=l apt-get install -y cmake ${TOOLCHAIN_PACKAGES} - - run: sudo NEEDRESTART_MODE=l apt-get install -y libcmocka-dev libcjson-dev + - run: sudo apt-get update + # NEEDRESTART_MODE prevents automatic restarts which seem to hang. + - run: sudo NEEDRESTART_MODE=l apt-get install -y cmake ${TOOLCHAIN_PACKAGES} + - run: sudo NEEDRESTART_MODE=l apt-get install -y libcmocka-dev libcjson-dev + build-with-cmocka-from-source: + # For whatever reason, cmocka find stopped working on dockercross at some point. + steps: + - run: git clone https://git.cryptomilk.org/projects/cmocka.git ~/cmocka + - run: > + cd $(mktemp -d /tmp/build.XXXX) && + cmake ~/cmocka && + make && + sudo make install + - run: > + cmake -DWITH_TESTS=ON \ + -DWITH_EXAMPLES=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DSANITIZE=OFF \ + -DCOVERAGE="${CMAKE_COVERAGE:='OFF'}" \ + -DCMOCKA_INCLUDE_DIR=/usr/local/include \ + -DCMOCKA_LIBRARIES=/usr/local/lib/libcmocka.so + - run: make -j 16 VERBOSE=1 build: steps: - - run: > - cmake -DWITH_TESTS=ON \ - -DWITH_EXAMPLES=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DSANITIZE=OFF \ - -DCOVERAGE="${CMAKE_COVERAGE:='OFF'}" \ - . - - run: make -j 16 VERBOSE=1 + - run: > + cmake -DWITH_TESTS=ON \ + -DWITH_EXAMPLES=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DSANITIZE=OFF \ + -DCOVERAGE="${CMAKE_COVERAGE:='OFF'}" \ + . + - run: make -j 16 VERBOSE=1 build-release: steps: - - run: > - cmake -DWITH_TESTS=ON \ - -DCMAKE_BUILD_TYPE=Release \ - . - - run: make -j 16 VERBOSE=1 + - run: > + cmake -DWITH_TESTS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + . + - run: make -j 16 VERBOSE=1 test: steps: - - run: ctest -VV + - run: ctest -VV --output-junit ctest_out.xml + - store_test_results: + path: ctest_out.xml orbs: codecov: codecov/codecov@3.2.2 @@ -34,7 +54,7 @@ orbs: jobs: static-test: machine: &default-machine - image: ubuntu-2204:2023.07.2 + image: ubuntu-2204:current environment: TOOLCHAIN_PACKAGES: g++ steps: @@ -159,7 +179,7 @@ jobs: build-bazel: machine: - image: ubuntu-2204:2023.07.2 + <<: *default-machine environment: TOOLCHAIN_PACKAGES: g++ steps: @@ -178,7 +198,7 @@ jobs: build-and-test-osx: macos: - xcode: 12.5.1 + xcode: 16.0.0 steps: - checkout - run: bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" @@ -203,7 +223,33 @@ jobs: - run: /c/Program\ Files/Cmake/bin/cmake --build libcbor_build - run: > export PATH="$(pwd)/cmocka_build/src/Debug/:$PATH" && - /c/Program\ Files/Cmake/bin/ctest.exe --test-dir libcbor_build --output-on-failure + /c/Program\ Files/Cmake/bin/ctest.exe --test-dir libcbor_build -C Debug --output-on-failure + + build-and-test-mips: &dockcross-job + docker: + - image: dockcross/linux-mips-lts + steps: + - checkout + - attach_workspace: + at: /home/circleci/project + - build-with-cmocka-from-source + - test + + build-and-test-mipsel: + <<: *dockcross-job + docker: + - image: dockcross/linux-mipsel-lts + + + build-and-test-riscv64: &dockcross-job + docker: + - image: dockcross/linux-riscv64 + steps: + - checkout + - attach_workspace: + at: /home/circleci/project + - build-with-cmocka-from-source + - test workflows: build-and-test: @@ -215,6 +261,9 @@ workflows: - build-and-test-release-clang - build-and-test-arm - build-and-test-win + - build-and-test-mips + - build-and-test-mipsel + - build-and-test-riscv64 - build-bazel - llvm-coverage # OSX builds are expensive, run only on master diff --git a/contrib/libcbor/.cirrus.yml b/contrib/libcbor/.cirrus.yml index 948ae23b4a98..beaea2a0b6ee 100644 --- a/contrib/libcbor/.cirrus.yml +++ b/contrib/libcbor/.cirrus.yml @@ -5,7 +5,6 @@ freebsd_task: - mkdir build - cd build - cmake -GNinja -DWITH_TESTS=ON - -DCBOR_CUSTOM_ALLOC=ON -DCMAKE_BUILD_TYPE=Debug -DSANITIZE=OFF .. @@ -15,12 +14,12 @@ freebsd_task: - ctest -VV matrix: # From gcloud compute images list --project freebsd-org-cloud-dev --no-standard-images - - name: freebsd-13-2 + - name: freebsd-13-4 freebsd_instance: - image_family: freebsd-13-2 - - name: freebsd-14-0 + image_family: freebsd-13-4 + - name: freebsd-14-2 freebsd_instance: - image_family: freebsd-14-0 + image_family: freebsd-14-2 - name: freebsd-15-0-snap freebsd_instance: image_family: freebsd-15-0-snap diff --git a/contrib/libcbor/.clang-format b/contrib/libcbor/.clang-format index de0dc5e77907..09a8bc907ac8 100644 --- a/contrib/libcbor/.clang-format +++ b/contrib/libcbor/.clang-format @@ -52,7 +52,7 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true -DerivePointerAlignment: true +DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: true diff --git a/contrib/libcbor/.github/workflows/fuzz-pr.yml b/contrib/libcbor/.github/workflows/fuzz-pr.yml index 0e3c4fde4e02..aba933e193f3 100644 --- a/contrib/libcbor/.github/workflows/fuzz-pr.yml +++ b/contrib/libcbor/.github/workflows/fuzz-pr.yml @@ -18,7 +18,7 @@ jobs: fuzz-seconds: 10 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v4 if: failure() with: name: artifacts diff --git a/contrib/libcbor/.github/workflows/fuzz.yml b/contrib/libcbor/.github/workflows/fuzz.yml index 8603cc2f338e..8a95cdff76db 100644 --- a/contrib/libcbor/.github/workflows/fuzz.yml +++ b/contrib/libcbor/.github/workflows/fuzz.yml @@ -18,7 +18,7 @@ jobs: fuzz-seconds: 14400 # 4 hours dry-run: false - name: Upload Crash - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v4 if: failure() with: name: artifacts diff --git a/contrib/libcbor/.gitignore b/contrib/libcbor/.gitignore index 61f88a874965..cfc2f906bd0a 100644 --- a/contrib/libcbor/.gitignore +++ b/contrib/libcbor/.gitignore @@ -1,4 +1,3 @@ -build *~ nbproject .idea @@ -7,10 +6,13 @@ doxygen_docs cmake-build-debug venv **.DS_Store -.vscode +.vscode/tmp +.vscode/c_cpp_properties.json +doc/build # No top-level requirements, see doc/source requirements.txt examples/bazel/bazel-bazel examples/bazel/bazel-bin examples/bazel/bazel-out examples/bazel/bazel-testlogs +**MODULE.bazel.lock diff --git a/contrib/libcbor/.vscode/settings.json b/contrib/libcbor/.vscode/settings.json new file mode 100644 index 000000000000..1efd1526d101 --- /dev/null +++ b/contrib/libcbor/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "C_Cpp.clang_format_style": "file", + "editor.formatOnSave": true, + "cmake.configureOnOpen": true, + "cmake.buildDirectory": "${workspaceFolder}/.vscode/tmp/build_${buildType}", +}
\ No newline at end of file diff --git a/contrib/libcbor/BUILD b/contrib/libcbor/BUILD new file mode 100644 index 000000000000..5035c90f756b --- /dev/null +++ b/contrib/libcbor/BUILD @@ -0,0 +1,58 @@ +genrule( + name = "cbor_cmake", + srcs = glob(["**"]), + outs = [ + "libcbor.a", + "cbor.h", + "cbor/arrays.h", + "cbor/bytestrings.h", + "cbor/callbacks.h", + "cbor/cbor_export.h", + "cbor/common.h", + "cbor/configuration.h", + "cbor/data.h", + "cbor/encoding.h", + "cbor/floats_ctrls.h", + "cbor/ints.h", + "cbor/maps.h", + "cbor/serialization.h", + "cbor/streaming.h", + "cbor/strings.h", + "cbor/tags.h", + ], + cmd = " && ".join([ + # Remember where output should go. + "INITIAL_WD=`pwd`", + "cd `dirname $(location CMakeLists.txt)`", + "cmake -DCMAKE_BUILD_TYPE=Release .", + "cmake --build .", + # Export the .a and .h files for cbor rule, below. + "cp -R src/* $$INITIAL_WD/$(RULEDIR)", + "cp cbor/configuration.h $$INITIAL_WD/$(RULEDIR)/cbor", + ]), + visibility = ["//visibility:private"], +) + +cc_import( + name = "cbor", + hdrs = [ + "cbor.h", + "cbor/arrays.h", + "cbor/bytestrings.h", + "cbor/callbacks.h", + "cbor/cbor_export.h", + "cbor/common.h", + "cbor/configuration.h", + "cbor/data.h", + "cbor/encoding.h", + "cbor/floats_ctrls.h", + "cbor/ints.h", + "cbor/maps.h", + "cbor/serialization.h", + "cbor/streaming.h", + "cbor/strings.h", + "cbor/tags.h", + ], + static_library = "libcbor.a", + visibility = ["//visibility:public"], +) diff --git a/contrib/libcbor/Bazel.md b/contrib/libcbor/Bazel.md deleted file mode 100644 index 9fa2081f58fc..000000000000 --- a/contrib/libcbor/Bazel.md +++ /dev/null @@ -1,100 +0,0 @@ -# Use as a Bazel Dependency - -To use libcbor in your -[Baze](https://bazel.build/) -project, first add the following section to your project's `WORKSPACE` file. -Note the location of the `third_party/libcbor.BUILD` file - you may use a -different location if you wish, but you the file must be make available to -`WORKSPACE`. - -## WORKSPACE - -Note, this imports version `0.8.0` - you may need to update the version and -the sha256 hash. - -```python -# libcbor -http_archive( - name = "libcbor", - build_file = "//third_party:libcbor.BUILD", - sha256 = "dd04ea1a7df484217058d389e027e7a0143a4f245aa18a9f89a5dd3e1a4fcc9a", - strip_prefix = "libcbor-0.8.0", - urls = ["https://github.com/PJK/libcbor/archive/refs/tags/v0.8.0.zip"], -) -``` - -## third_party/libcbor.BUILD - -Bazel will unzip the libcbor zip file, then copy this file in as `BUILD`. -Bazel will then use this file to compile libcbor. -[Cmake](https://cmake.org/) -is used in two passes: to create the Makefiles, and then to invoke Make to build -the `libcbor.a` static library. `libcbor.a` and the `.h` files are then made -available for other packages to use. - -```python -genrule( - name = "cbor_cmake", - srcs = glob(["**"]), - outs = ["libcbor.a", "cbor.h", "cbor/arrays.h", "cbor/bytestrings.h", - "cbor/callbacks.h", "cbor/cbor_export.h", "cbor/common.h", "cbor/configuration.h", "cbor/data.h", - "cbor/encoding.h", "cbor/floats_ctrls.h", "cbor/ints.h", "cbor/maps.h", - "cbor/serialization.h", "cbor/streaming.h", "cbor/strings.h", "cbor/tags.h"], - cmd = " && ".join([ - # Remember where output should go. - "INITIAL_WD=`pwd`", - # Build libcbor library. - "cd `dirname $(location CMakeLists.txt)`", - "cmake -DCMAKE_BUILD_TYPE=Release .", - "cmake --build .", - # Export the .a and .h files for cbor rule, below. - "cp src/libcbor.a src/cbor.h $$INITIAL_WD/$(RULEDIR)", - "cp src/cbor/*h cbor/configuration.h $$INITIAL_WD/$(RULEDIR)/cbor"]), - visibility = ["//visibility:private"], -) - -cc_import( - name = "cbor", - hdrs = ["cbor.h", "cbor/arrays.h", "cbor/bytestrings.h", - "cbor/callbacks.h", "cbor/cbor_export.h", "cbor/common.h", "cbor/configuration.h", "cbor/data.h", - "cbor/encoding.h", "cbor/floats_ctrls.h", "cbor/ints.h", "cbor/maps.h", - "cbor/serialization.h", "cbor/streaming.h", "cbor/strings.h", "cbor/tags.h"], - static_library = "libcbor.a", - visibility = ["//visibility:public"], -) -``` - -## third_party/BUILD - -The `libcbor.BUILD` file must be make available to the top-level `WORKSPACE` -file: - -```python -exports_files(["libcbor.BUILD"])) -``` - -## Your BUILD File - -Add libcbor dependency to your package's `BUILD` file like so: - -```python -cc_library( - name = "...", - srcs = [ ... ], - hdrs = [ ... ], - deps = [ - ... - "@libcbor//:cbor", - ], -) -``` - -## Your C File - -Now you may simply include `cbor.h`: - -```c -#include "cbor.h" - -static const uint8_t version = cbor_major_version; -``` diff --git a/contrib/libcbor/CHANGELOG.md b/contrib/libcbor/CHANGELOG.md index 3c331f9266ab..7509569b414f 100644 --- a/contrib/libcbor/CHANGELOG.md +++ b/contrib/libcbor/CHANGELOG.md @@ -1,40 +1,76 @@ Template: + - [Fix issue X in feature Y](https://github.com/PJK/libcbor/pull/XXX) (by [YYY](https://github.com/YYY)) Next --------------------- +0.13.0 (2025-08-30) +--------------------- + +- [Fix `cbor_is_null`, `cbor_is_undef`, `cbor_is_bool` assertion failing on non-ctrl floats in debug mode](https://github.com/PJK/libcbor/issues/352) (bug discovered by <https://github.com/psturm-swift>) +- [Add an example for handling of CBOR Sequences](https://github.com/PJK/libcbor/pull/358) +- [Use C23/c2x if available](https://github.com/PJK/libcbor/pull/361) + - libcbor remains C99 compatible + - When the compiler does not support new standard, C99 will be used, so the change should be backwards compatible +- [Improved introduction documentation and examples](https://github.com/PJK/libcbor/pull/363) +- [Add cbor_copy_definite to turn indefinite items into definite equivalents](https://github.com/PJK/libcbor/pull/364/files) (proposed by Jacob Teplitsky) +- BUILD BREAKING: [Minimum CMake version set to 3.5](https://github.com/PJK/libcbor/pull/355) to [be compatible with CMake 4](https://github.com/eclipse-ecal/ecal/issues/2041) ([suggestion](https://github.com/PJK/libcbor/commit/1183292d4695300785b272532c1e02d68840e4b8#commitcomment-164507943) by <https://github.com/hnyman>) + - See <https://repology.org/project/cmake/versions> for support; the vast majority of users should not be affected. + +0.12.0 (2025-03-16) +--------------------- + +- BUILD BREAKING: [Respect `INTERPROCEDURAL_OPTIMIZATION` and use the default value](https://github.com/PJK/libcbor/issues/315) +- BREAKING: Changes to NaN encoding + - [Fix NaN encoding on Windows](https://github.com/PJK/libcbor/issues/271) + - [Fix NaN encoding on mips/mipsel](https://github.com/PJK/libcbor/issues/329) + - [Signaling NaNs will from now on be encoded as canonical quiet NaNs](https://github.com/PJK/libcbor/pull/335). This was already the existing behavior for half-precision floats + - Decoding is unchanged + - Please note that this is an intermediate state and likely to be revisited (<https://github.com/PJK/libcbor/issues/336>) +- [Make build compatible with CMake FetchContent](https://github.com/PJK/libcbor/pull/341) (by [Jan200101](https://github.com/Jan200101)) +- [Support Bzlmod for Bazel builds](https://github.com/PJK/libcbor/pull/340) + - This should significantly simplify including libcbor as a dependency/module in Bazel projects, see <https://bazel.build/external/migration> +- Code quality improvements + - [Fix compiler pragmas](https://github.com/PJK/libcbor/pull/347) (by [brooksdavis](https://github.com/brooksdavis)) + - [Fix code style issues](https://github.com/PJK/libcbor/pull/321) +- [Fixed bug in cbor2cjson example](https://github.com/PJK/libcbor/pull/338) (by [whitehse](https://github.com/whitehse)) + 0.11.0 (2024-02-04) --------------------- + - [Updated documentation to refer to RFC 8949](https://github.com/PJK/libcbor/issues/269) - Improvements to `cbor_describe` - - [Bytestring data will now be printed as well](https://github.com/PJK/libcbor/pull/281) by [akallabeth](https://github.com/akallabeth) + - [Bytestring data will now be printed as well](https://github.com/PJK/libcbor/pull/281) by [akallabeth](https://github.com/akallabeth) - [Formatting consistency and clarity improvements](https://github.com/PJK/libcbor/pull/285) - [Fix `cbor_string_set_handle` not setting the codepoint count](https://github.com/PJK/libcbor/pull/286) - BREAKING: [`cbor_load` will no longer fail on input strings that are well-formed but not valid UTF-8](https://github.com/PJK/libcbor/pull/286) - - If you were relying on the validation, please check the result using `cbor_string_codepoint_count` instead + - If you were relying on the validation, please check the result using `cbor_string_codepoint_count` instead - BREAKING: [All decoders like `cbor_load` and `cbor_stream_decode` will accept all well-formed tag values](https://github.com/PJK/libcbor/pull/308) (bug discovered by [dskern-github](https://github.com/dskern-github)) - Previously, decoding of certain values would fail with `CBOR_ERR_MALFORMATED` or `CBOR_DECODER_ERROR` - This also makes decoding symmetrical with serialization, which already accepts all values 0.10.2 (2023-01-31) --------------------- + - [Fixed minor test bug causing failures for x86 Linux](https://github.com/PJK/libcbor/pull/266) (discovered by [trofi](https://github.com/PJK/libcbor/issues/263)) - Actual libcbor functionality not affected, bug was in the test suite - [Made tests platform-independent](https://github.com/PJK/libcbor/pull/272) 0.10.1 (2022-12-30) --------------------- + - [Fix a regression in `cbor_serialize_alloc` that caused serialization of zero-length strings and bytestrings or byte/strings with zero-length chunks to fail](https://github.com/PJK/libcbor/pull/260) (discovered by [martelletto](https://github.com/martelletto)) 0.10.0 (2022-12-29) --------------------- + - Make the buffer_size optional in `cbor_serialize_alloc` [[#205]](https://github.com/PJK/libcbor/pull/205) (by [hughsie](https://github.com/hughsie)) - BREAKING: Improved half-float encoding for denormalized numbers. [[#208]](https://github.com/PJK/libcbor/pull/208) (by [ranvis](https://github.com/ranvis)) - Denormalized half-floats will now preserve data in the mantissa - - Note: Half-float NaNs still lose data (https://github.com/PJK/libcbor/issues/215) + - Note: Half-float NaNs still lose data (<https://github.com/PJK/libcbor/issues/215>) - BUILD BREAKING: Minimum CMake version is 3.0 [[#201]](https://github.com/PJK/libcbor/pull/201) (by [thewtex@](https://github.com/thewtex)) - - See https://repology.org/project/cmake/versions for support; the vast majority of users should not be affected. + - See <https://repology.org/project/cmake/versions> for support; the vast majority of users should not be affected. - Fix a potential memory leak when the allocator fails during array or map decoding [[#224]](https://github.com/PJK/libcbor/pull/224) (by [James-ZHANG](https://github.com/James-ZHANG)) - [Fix a memory leak when the allocator fails when adding chunks to indefinite bytestrings.](https://github.com/PJK/libcbor/pull/242) ([discovered](https://github.com/PJK/libcbor/pull/228) by [James-ZHANG](https://github.com/James-ZHANG)) - [Fix a memory leak when the allocator fails when adding chunks to indefinite strings](https://github.com/PJK/libcbor/pull/246) @@ -53,57 +89,62 @@ Next 0.9.0 (2021-11-14) --------------------- + - Improved pkg-config paths handling [[#164]](https://github.com/PJK/libcbor/pull/164) (by [jtojnar@](https://github.com/jtojnar)) - Use explicit math.h linkage [[#170]](https://github.com/PJK/libcbor/pull/170) - BREAKING: Fixed handling of items that exceed the host size_t range [[#186]](https://github.com/PJK/libcbor/pull/186hg) - - Callbacks for bytestrings, strings, arrays, and maps use uint64_t instead of size_t to allow handling of large items that exceed size_t even if size_t < uint64_t - - cbor_decode explicitly checks size to avoid overflows (previously broken, potentially resulting in erroneous decoding on affected systems) - - The change should be a noop for 64b systems + - Callbacks for bytestrings, strings, arrays, and maps use uint64_t instead of size_t to allow handling of large items that exceed size_t even if size_t < uint64_t + - cbor_decode explicitly checks size to avoid overflows (previously broken, potentially resulting in erroneous decoding on affected systems) + - The change should be a noop for 64b systems - Added a [Bazel](https://bazel.build/) build example [[#196]](https://github.com/PJK/libcbor/pull/196) (by [andyjgf@](https://github.com/andyjgf)) 0.8.0 (2020-09-20) --------------------- + - BUILD BREAKING: Use BUILD_SHARED_LIBS to determine how to build libraries (fixed Windows linkage) [[#148]](https://github.com/PJK/libcbor/pull/148) (by [intelligide@](https://github.com/intelligide)) - BREAKING: Fix `cbor_tag_item` not increasing the reference count on the tagged item reference it returns [[Fixes #109](https://github.com/PJK/libcbor/issues/109)] (discovered bt [JohnGilmour](https://github.com/JohnGilmour)) - If you have previously relied on the broken behavior, you can use `cbor_move` to emulate as long as the returned handle is an "rvalue" - BREAKING: [`CBOR_DECODER_EBUFFER` removed from `cbor_decoder_status`](https://github.com/PJK/libcbor/pull/156) - - `cbor_stream_decode` will set `CBOR_DECODER_NEDATA` instead if the input buffer is empty + - `cbor_stream_decode` will set `CBOR_DECODER_NEDATA` instead if the input buffer is empty - [Fix `cbor_stream_decode`](https://github.com/PJK/libcbor/pull/156) to set `cbor_decoder_result.required` to the minimum number of input bytes necessary to receive the next callback (as long as at least one byte was passed) (discovered by [woefulwabbit](https://github.com/woefulwabbit)) - Fixed several minor manpage issues [[#159]](https://github.com/PJK/libcbor/pull/159) (discovered by [kloczek@](https://github.com/kloczek)) 0.7.0 (2020-04-25) --------------------- + - Fix bad encoding of NaN half-floats [[Fixes #53]](https://github.com/PJK/libcbor/issues/53) (discovered by [BSipos-RKF](https://github.com/BSipos-RKF)) - - **Warning**: Previous versions encoded NaNs as `0xf9e700` instead of `0xf97e00`; if you rely on the broken behavior, this will be a breaking change + - **Warning**: Previous versions encoded NaNs as `0xf9e700` instead of `0xf97e00`; if you rely on the broken behavior, this will be a breaking change - Fix potentially bad encoding of negative half-float with exponent < -14 [[Fixes #112]](https://github.com/PJK/libcbor/issues/112) (discovered by [yami36](https://github.com/yami36)) - BREAKING: Improved bool support [[Fixes #63]](https://github.com/PJK/libcbor/issues/63) - - Rename `cbor_ctrl_is_bool` to `cbor_get_bool` and fix the behavior - - Add `cbor_set_bool` + - Rename `cbor_ctrl_is_bool` to `cbor_get_bool` and fix the behavior + - Add `cbor_set_bool` - Fix memory_allocation_test breaking the build without CBOR_CUSTOM_ALLOC [[Fixes #128]](https://github.com/PJK/libcbor/issues/128) (by [panlinux](https://github.com/panlinux)) - [Fix a potential build issue where cJSON includes may be misconfigured](https://github.com/PJK/libcbor/pull/132) - Breaking: [Add a limit on the size of the decoding context stack](https://github.com/PJK/libcbor/pull/138) (by [James-ZHANG](https://github.com/James-ZHANG)) - - If your usecase requires parsing very deeply nested structures, you might need to increase the default 2k limit via `CBOR_MAX_STACK_SIZE` + - If your usecase requires parsing very deeply nested structures, you might need to increase the default 2k limit via `CBOR_MAX_STACK_SIZE` - Enable LTO/IPO based on [CheckIPOSupported](https://cmake.org/cmake/help/latest/module/CheckIPOSupported.html#module:CheckIPOSupported) [[#143]](https://github.com/PJK/libcbor/pull/143) (by [xanderlent](https://github.com/xanderlent)) - - If you rely on LTO being enabled and use CMake version older than 3.9, you will need to re-enable it manually or upgrade your CMake + - If you rely on LTO being enabled and use CMake version older than 3.9, you will need to re-enable it manually or upgrade your CMake 0.6.1 (2020-03-26) --------------------- + - [Fix bad shared library version number](https://github.com/PJK/libcbor/pull/131) - - **Warning**: Shared library built from the 0.6.0 release is erroneously marked as version "0.6.0", which makes it incompatible with future releases *including the v0.6.X line* even though they may be compatible API/ABI-wise. Refer to the documentation for the new SO versioning scheme. + - **Warning**: Shared library built from the 0.6.0 release is erroneously marked as version "0.6.0", which makes it incompatible with future releases *including the v0.6.X line* even though they may be compatible API/ABI-wise. Refer to the documentation for the new SO versioning scheme. 0.6.0 (2020-03-15) --------------------- -- Correctly set .so version [[Fixes #52]](https://github.com/PJK/libcbor/issues/52). - - **Warning**: All previous releases will be identified as 0.0 by the linker. + +- Correctly set .so version [[Fixes #52]](https://github.com/PJK/libcbor/issues/52). + - **Warning**: All previous releases will be identified as 0.0 by the linker. - Fix & prevent heap overflow error in example code [[#74]](https://github.com/PJK/libcbor/pull/74) [[#76]](https://github.com/PJK/libcbor/pull/76) (by @nevun) - Correctly set OSX dynamic library version [[Fixes #75]](https://github.com/PJK/libcbor/issues/75) - [Fix misplaced 0xFF bytes in maps possibly causing memory corruption](https://github.com/PJK/libcbor/pull/82) - BREAKING: Fix handling & cleanup of failed memory allocation in constructor and builder helper functions [[Fixes #84]](https://github.com/PJK/libcbor/issues/84) - - All cbor_new_* and cbor_build_* functions will now explicitly return NULL when memory allocation fails + - All cbor_new_*and cbor_build_* functions will now explicitly return NULL when memory allocation fails - It is up to the client to handle such cases - Globally enforced code style [[Fixes #83]](https://github.com/PJK/libcbor/issues/83) -- Fix issue possible memory corruption bug on repeated +- Fix issue possible memory corruption bug on repeated cbor_(byte)string_add_chunk calls with intermittently failing realloc calls - Fix possibly misaligned reads and writes when endian.h is uses or when running on a big-endian machine [[Fixes #99](https://github.com/PJK/libcbor/issues/99), [#100](https://github.com/PJK/libcbor/issues/100)] @@ -112,6 +153,7 @@ Next 0.5.0 (2017-02-06) --------------------- + - Remove cmocka from the subtree (always rely on system or user-provided version) - Windows CI - Only build tests if explicitly enabled (`-DWITH_TESTS=ON`) @@ -127,6 +169,7 @@ Next 0.4.0 (2015-12-25) --------------------- + Breaks build & header compatibility due to: - Improved build configuration and feature check macros @@ -137,6 +180,7 @@ Breaks build & header compatibility due to: 0.3.1 (2015-05-21) --------------------- + - documentation and comments improvements, mostly for the API reference 0.3.0 (2015-05-21) @@ -152,6 +196,7 @@ Breaks build & header compatibility due to: 0.2.1 (2015-05-17) --------------------- + - C99 support 0.2.0 (2015-05-17) diff --git a/contrib/libcbor/CMakeLists.txt b/contrib/libcbor/CMakeLists.txt index 16b9f0875d33..a7e133a3e888 100644 --- a/contrib/libcbor/CMakeLists.txt +++ b/contrib/libcbor/CMakeLists.txt @@ -1,16 +1,19 @@ -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.5) -project(libcbor) -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/") +project(libcbor LANGUAGES C CXX) +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} + "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/") include(CTest) include(GNUInstallDirs) # Provides CMAKE_INSTALL_ variables -SET(CBOR_VERSION_MAJOR "0") -SET(CBOR_VERSION_MINOR "11") -SET(CBOR_VERSION_PATCH "0") -SET(CBOR_VERSION ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}.${CBOR_VERSION_PATCH}) +set(CBOR_VERSION_MAJOR "0") +set(CBOR_VERSION_MINOR "13") +set(CBOR_VERSION_PATCH "0") +set(CBOR_VERSION + ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}.${CBOR_VERSION_PATCH}) -option(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY "cmake --build --target install does not depend on cmake --build" true) +option(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY + "cmake --build --target install does not depend on cmake --build" true) option(BUILD_SHARED_LIBS "Build as a shared library" false) include(CheckIncludeFiles) @@ -18,42 +21,51 @@ include(CheckIncludeFiles) include(TestBigEndian) test_big_endian(BIG_ENDIAN) if(BIG_ENDIAN) - add_definitions(-DIS_BIG_ENDIAN) + add_definitions(-DIS_BIG_ENDIAN) endif() option(CBOR_CUSTOM_ALLOC "Custom, dynamically defined allocator support" OFF) if(CBOR_CUSTOM_ALLOC) - message(WARNING - "CBOR_CUSTOM_ALLOC has been deprecated. Custom allocators are now enabled by default." - "The flag is a no-op and will be removed in the next version. " - "Please remove CBOR_CUSTOM_ALLOC from your build configuration.") -endif(CBOR_CUSTOM_ALLOC) + message( + WARNING + "CBOR_CUSTOM_ALLOC has been deprecated. \ + Custom allocators are now enabled by default. \ + The flag is a no-op and will be removed in the next version. \ + Please remove CBOR_CUSTOM_ALLOC from your build configuration.") +endif() option(CBOR_PRETTY_PRINTER "Include a pretty-printing routine" ON) -set(CBOR_BUFFER_GROWTH "2" CACHE STRING "Factor for buffer growth & shrinking") -set(CBOR_MAX_STACK_SIZE "2048" CACHE STRING "maximum size for decoding context stack") +set(CBOR_BUFFER_GROWTH + "2" + CACHE STRING "Factor for buffer growth & shrinking") +set(CBOR_MAX_STACK_SIZE + "2048" + CACHE STRING "maximum size for decoding context stack") option(WITH_TESTS "[TEST] Build unit tests (requires CMocka)" OFF) if(WITH_TESTS) - add_definitions(-DWITH_TESTS) -endif(WITH_TESTS) + add_definitions(-DWITH_TESTS) +endif() option(WITH_EXAMPLES "Build examples" ON) -option(HUGE_FUZZ "[TEST] Fuzz through 8GB of data in the test. Do not use with memory instrumentation!" OFF) +option(HUGE_FUZZ "[TEST] Fuzz through 8GB of data in the test.\ + Do not use with memory instrumentation!" OFF) if(HUGE_FUZZ) - add_definitions(-DHUGE_FUZZ) -endif(HUGE_FUZZ) + add_definitions(-DHUGE_FUZZ) +endif() -option(SANE_MALLOC "[TEST] Assume that malloc will not allocate multi-GB blocks. Tests only, platform specific" OFF) +option(SANE_MALLOC + "[TEST] Assume that malloc will not allocate multi-GB blocks.\ + Tests only, platform specific" OFF) if(SANE_MALLOC) - add_definitions(-DSANE_MALLOC) -endif(SANE_MALLOC) + add_definitions(-DSANE_MALLOC) +endif() option(PRINT_FUZZ "[TEST] Print the fuzzer input" OFF) if(PRINT_FUZZ) - add_definitions(-DPRINT_FUZZ) -endif(PRINT_FUZZ) + add_definitions(-DPRINT_FUZZ) +endif() option(SANITIZE "Enable ASan & a few compatible sanitizers in Debug mode" ON) @@ -67,119 +79,218 @@ set(CPACK_PACKAGE_VERSION_PATCH ${CBOR_VERSION_PATCH}) include(CPack) +# +# Configure compilation flags and language features +# + +include(CheckCSourceCompiles) + +check_c_source_compiles(" + #include <stdio.h> + [[nodiscard]] int f(void) { return 42; } + int main(void) { return f(); } +" HAS_NODISCARD_ATTRIBUTE) + +if (HAS_NODISCARD_ATTRIBUTE) + message(STATUS "[[nodiscard]] is supported.") + add_definitions(-D_CBOR_HAS_NODISCARD_ATTRIBUTE) + # Assume that if we have [[nodiscard]], we have some C23 support. May fail. + if(NOT DEFINED CMAKE_C_STANDARD) + message(STATUS "Switching to C23-like mode. To prevent this, pass -DCMAKE_C_STANDARD explicitly.") + # On Clang 16, this is resolved to -std=c2x + set(CMAKE_C_STANDARD 23 CACHE STRING "C language standard") + endif() +endif() + if(MINGW) - # https://github.com/PJK/libcbor/issues/13 - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") + # https://github.com/PJK/libcbor/issues/13 + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") elseif(NOT MSVC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 -pedantic") + # Default to C99 + if(NOT DEFINED CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99 CACHE STRING "C language standard") + endif() + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic") endif() +# CMAKE_C_STANDARD set above +set(CMAKE_C_STANDARD_REQUIRED ON) +set(CMAKE_C_EXTENSIONS OFF) + if(MSVC) - # This just doesn't work right -- https://msdn.microsoft.com/en-us/library/5ft82fed.aspx - set(CBOR_RESTRICT_SPECIFIER "") + # This just doesn't work right -- + # https://msdn.microsoft.com/en-us/library/5ft82fed.aspx + set(CBOR_RESTRICT_SPECIFIER "") + # Safe stdio is only available in C11 + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + + set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /sdl") else() - set(CBOR_RESTRICT_SPECIFIER "restrict") + set(CBOR_RESTRICT_SPECIFIER "restrict") - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -Wall -g -ggdb -DDEBUG=true") - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3 -Wall -DNDEBUG") + set(CMAKE_C_FLAGS_DEBUG + "${CMAKE_C_FLAGS_DEBUG} -O0 -Wall -Wextra -g -ggdb -DDEBUG=true") + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3 -Wall -Wextra -DNDEBUG") - if(SANITIZE) - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} \ + if(SANITIZE) + set(CMAKE_C_FLAGS_DEBUG + "${CMAKE_C_FLAGS_DEBUG} \ -fsanitize=undefined -fsanitize=address \ -fsanitize=bounds -fsanitize=alignment") - endif() -endif() - -set(CMAKE_EXE_LINKER_FLAGS_DEBUG "-g") + endif() + set(CMAKE_EXE_LINKER_FLAGS_DEBUG "-g") +endif() include(CheckTypeSize) check_type_size("size_t" SIZEOF_SIZE_T) if(SIZEOF_SIZE_T LESS 8) - message(WARNING "Your size_t is less than 8 bytes. Decoding of huge items that would exceed the memory address space will always fail. Consider implementing a custom streaming decoder if you need to deal with huge items.") + message( + WARNING + "Your size_t is less than 8 bytes. \ + Decoding of huge items that would exceed the memory address space \ + will always fail. Consider implementing a custom streaming \ + decoder if you need to deal with huge items.") +else() + add_definitions(-DEIGHT_BYTE_SIZE_T) +endif() + +check_c_source_compiles(" + int main() { + __builtin_unreachable(); + return 0; + } +" HAS_BUILTIN_UNREACHABLE) + +if (HAS_BUILTIN_UNREACHABLE) + add_definitions(-D_CBOR_HAS_BUILTIN_UNREACHABLE) +endif() + +# CMake >= 3.9.0 enables LTO for GCC and Clang with INTERPROCEDURAL_OPTIMIZATION +# Policy CMP0069 enables this behavior when we set the minimum CMake version < +# 3.9.0 Checking for LTO support before setting INTERPROCEDURAL_OPTIMIZATION is +# mandatory with CMP0069 set to NEW. +set(LTO_SUPPORTED FALSE) +if(${CMAKE_VERSION} VERSION_GREATER "3.9.0" OR ${CMAKE_VERSION} VERSION_EQUAL + "3.9.0") + cmake_policy(SET CMP0069 NEW) + # Require LTO support to build libcbor with newer CMake versions + include(CheckIPOSupported) + check_ipo_supported(RESULT LTO_SUPPORTED) +endif() + +if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION) + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION ON) +endif() + +if(LTO_SUPPORTED) + message( + STATUS + "LTO is supported and CMAKE_INTERPROCEDURAL_OPTIMIZATION=${CMAKE_INTERPROCEDURAL_OPTIMIZATION}" + ) else() - add_definitions(-DEIGHT_BYTE_SIZE_T) + message(STATUS "LTO is not supported") endif() +# +# Testing and validation +# + enable_testing() set(CTEST_MEMORYCHECK_COMMAND "/usr/bin/valgrind") -set(MEMORYCHECK_COMMAND_OPTIONS "--tool=memcheck --track-origins=yes --leak-check=full --error-exitcode=1") - -add_custom_target(coverage - COMMAND ctest - COMMAND lcov --capture --directory . --output-file coverage.info - COMMAND genhtml coverage.info --highlight --legend --output-directory coverage_html - COMMAND echo "Coverage report ready: ${CMAKE_CURRENT_BINARY_DIR}/coverage_html/index.html") - -add_custom_target(llvm-coverage - COMMAND make -j 16 - COMMAND rm -rf coverage_profiles - COMMAND mkdir coverage_profiles - COMMAND bash -c [[ for TEST in $(ls test/*_test); do LLVM_PROFILE_FILE="coverage_profiles/$(basename -- ${TEST}).profraw" ./${TEST}; done ]] - # VERBATIM makes escaping working, but breaks shell expansions, so we need to explicitly use bash - COMMAND bash -c [[ llvm-profdata merge -sparse $(ls coverage_profiles/*.profraw) -o coverage_profiles/combined.profdata ]] - COMMAND bash -c [[ llvm-cov show -instr-profile=coverage_profiles/combined.profdata test/*_test -format=html > coverage_profiles/report.html ]] - COMMAND bash -c [[ llvm-cov report -instr-profile=coverage_profiles/combined.profdata test/*_test ]] - COMMAND echo "Coverage report ready: ${CMAKE_CURRENT_BINARY_DIR}/coverage_profiles/report.html" - VERBATIM) +set(MEMORYCHECK_COMMAND_OPTIONS + "--tool=memcheck --track-origins=yes --leak-check=full --error-exitcode=1") + +add_custom_target( + coverage + COMMAND ctest + COMMAND lcov --capture --directory . --output-file coverage.info + COMMAND genhtml coverage.info --highlight --legend --output-directory + coverage_html + COMMAND + echo + "Coverage report ready: ${CMAKE_CURRENT_BINARY_DIR}/coverage_html/index.html" + COMMENT "Generate coverage report using the GNU toolchain" +) + +add_custom_target( + llvm-coverage + COMMAND make -j 16 + COMMAND rm -rf coverage_profiles + COMMAND mkdir coverage_profiles + COMMAND + bash -c + [[ for TEST in $(ls test/*_test); do LLVM_PROFILE_FILE="coverage_profiles/$(basename -- ${TEST}).profraw" ./${TEST}; done ]] + # VERBATIM makes escaping working, but breaks shell expansions, so we need to + # explicitly use bash + COMMAND + bash -c + [[ llvm-profdata merge -sparse $(ls coverage_profiles/*.profraw) -o coverage_profiles/combined.profdata ]] + COMMAND + bash -c + [[ llvm-cov show -instr-profile=coverage_profiles/combined.profdata test/*_test -format=html > coverage_profiles/report.html ]] + COMMAND + bash -c + [[ llvm-cov report -instr-profile=coverage_profiles/combined.profdata test/*_test ]] + COMMAND + echo + "Coverage report ready: ${CMAKE_CURRENT_BINARY_DIR}/coverage_profiles/report.html" + VERBATIM + COMMENT "Generate coverage report using the LLVM toolchain") + +option(COVERAGE "Enable code coverage instrumentation" OFF) +if(COVERAGE) + message("Configuring code coverage instrumentation") + if(CMAKE_C_COMPILER_ID MATCHES "GNU") + # https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html + set(CMAKE_C_FLAGS + "${CMAKE_C_FLAGS} -g -fprofile-arcs -ftest-coverage --coverage") + set(CMAKE_EXE_LINKER_FLAGS_DEBUG + "${CMAKE_EXE_LINKER_FLAGS_DEBUG} -g -fprofile-arcs -ftest-coverage --coverage" + ) + elseif(CMAKE_C_COMPILER_ID MATCHES "Clang") + set(CMAKE_C_FLAGS + "${CMAKE_C_FLAGS} -fprofile-instr-generate -fcoverage-mapping") + set(CMAKE_EXE_LINKER_FLAGS_DEBUG + "${CMAKE_EXE_LINKER_FLAGS_DEBUG} -fprofile-instr-generate") + else() + message( + WARNING + "Code coverage build not implemented for compiler ${CMAKE_C_COMPILER_ID}" + ) + endif() +endif() + +# +# Configure build and targets +# include_directories(src) +# We want to generate configuration.h from the template and make it so that it +# is accessible using the same path during both library build and installed +# header use, without littering the source dir. +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/cbor/configuration.h.in + ${PROJECT_BINARY_DIR}/cbor/configuration.h) +install(FILES ${PROJECT_BINARY_DIR}/cbor/configuration.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/cbor) -option(c "Enable code coverage instrumentation" OFF) -if (COVERAGE) - message("Configuring code coverage instrumentation") - if(CMAKE_C_COMPILER_ID MATCHES "GNU") - # https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -fprofile-arcs -ftest-coverage --coverage") - set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} -g -fprofile-arcs -ftest-coverage --coverage") - elseif(CMAKE_C_COMPILER_ID MATCHES "Clang") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-instr-generate -fcoverage-mapping") - set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} -fprofile-instr-generate") - else() - message(WARNING "Code coverage build not implemented for compiler ${CMAKE_C_COMPILER_ID}") - endif() -endif (COVERAGE) - -# We want to generate configuration.h from the template and make it so that it is accessible using the same -# path during both library build and installed header use, without littering the source dir. -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/cbor/configuration.h.in ${PROJECT_BINARY_DIR}/cbor/configuration.h) -install(FILES ${PROJECT_BINARY_DIR}/cbor/configuration.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/cbor) -# Make the header visible at compile time -include_directories(${PROJECT_BINARY_DIR}) +add_subdirectory(src) +if(LTO_SUPPORTED) + set_property(DIRECTORY src PROPERTY INTERPROCEDURAL_OPTIMIZATION CMAKE_INTERPROCEDURAL_OPTIMIZATION) +endif() -# CMake >= 3.9.0 enables LTO for GCC and Clang with INTERPROCEDURAL_OPTIMIZATION -# Policy CMP0069 enables this behavior when we set the minimum CMake version < 3.9.0 -# Checking for LTO support before setting INTERPROCEDURAL_OPTIMIZATION is mandatory with CMP0069 set to NEW. -set(use_lto FALSE) -if(${CMAKE_VERSION} VERSION_GREATER "3.9.0" OR ${CMAKE_VERSION} VERSION_EQUAL "3.9.0") - cmake_policy(SET CMP0069 NEW) - # Require LTO support to build libcbor with newer CMake versions - include(CheckIPOSupported) - check_ipo_supported(RESULT use_lto) -endif(${CMAKE_VERSION} VERSION_GREATER "3.9.0" OR ${CMAKE_VERSION} VERSION_EQUAL "3.9.0") -if(use_lto) - message(STATUS "LTO is enabled") -else() - message(STATUS "LTO is not enabled") -endif(use_lto) +if(WITH_TESTS) + add_subdirectory(test) + if(LTO_SUPPORTED) + set_property(DIRECTORY test PROPERTY INTERPROCEDURAL_OPTIMIZATION CMAKE_INTERPROCEDURAL_OPTIMIZATION) + endif() +endif() -add_subdirectory(src) -if(use_lto) - set_property(DIRECTORY src PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) -endif(use_lto) - -if (WITH_TESTS) - add_subdirectory(test) - if(use_lto) - set_property(DIRECTORY test PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) - endif(use_lto) -endif (WITH_TESTS) - -if (WITH_EXAMPLES) - add_subdirectory(examples) - if(use_lto) - set_property(DIRECTORY examples PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) - endif(use_lto) -endif (WITH_EXAMPLES) +if(WITH_EXAMPLES) + add_subdirectory(examples) + if(LTO_SUPPORTED) + set_property(DIRECTORY examples PROPERTY INTERPROCEDURAL_OPTIMIZATION CMAKE_INTERPROCEDURAL_OPTIMIZATION) + endif() +endif() diff --git a/contrib/libcbor/CONTRIBUTING.md b/contrib/libcbor/CONTRIBUTING.md index 9bd42a06b6dc..c0f0da6029ca 100644 --- a/contrib/libcbor/CONTRIBUTING.md +++ b/contrib/libcbor/CONTRIBUTING.md @@ -29,6 +29,23 @@ I work on libcbor on a best effort basis. The typical response time is a few day If you do not receive a response in a few weeks, feel free to ping the PR or issue. +## Coding style + +C++ code: `./clang-format.sh`. + +Function Doxygen strings: +```c +/** An example short description + * + * @param item The item to fooify. Describe any expectations on it (e.g."must + * be a valid array") and reference counting manipulation. + * Multi-line descriptions are OK where necessary. + * @param target_item Short description, always with a full stop. + * @return Has fooification succeeded? List any special cases. + */ + bool foo(cbor_item_t *item, cbor_item_t *target_item); +``` + ## Resources - [Development documentation](https://libcbor.readthedocs.io/en/latest/development.html) diff --git a/contrib/libcbor/Doxyfile b/contrib/libcbor/Doxyfile index 67df45cfbb94..04964e73d5c3 100644 --- a/contrib/libcbor/Doxyfile +++ b/contrib/libcbor/Doxyfile @@ -48,7 +48,7 @@ PROJECT_NAME = libcbor # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.11.0 +PROJECT_NUMBER = 0.13.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/contrib/libcbor/MODULE.bazel b/contrib/libcbor/MODULE.bazel new file mode 100644 index 000000000000..fbc208ab2397 --- /dev/null +++ b/contrib/libcbor/MODULE.bazel @@ -0,0 +1 @@ +module(name = "libcbor") diff --git a/contrib/libcbor/README.md b/contrib/libcbor/README.md index ea54bed9437b..62d37b6b957d 100644 --- a/contrib/libcbor/README.md +++ b/contrib/libcbor/README.md @@ -8,16 +8,35 @@ **libcbor** is a C library for parsing and generating [CBOR](https://cbor.io/), the general-purpose schema-less binary data format. ## Main features - - Complete [IETF RFC 8949 (STD 94)](https://www.rfc-editor.org/info/std94) conformance - - Robust platform-independent C99 implementation - - Layered architecture offers both control and convenience - - Flexible memory management - - No shared global state - threading friendly - - Proper handling of UTF-8 - - Full support for streams & incremental processing - - Extensive documentation and test suite - - No runtime dependencies, small footprint - + +- Complete CBOR [IETF RFC 8949 (STD 94)](https://www.rfc-editor.org/info/std94) specification conformance (previously known as [RFC 7049](https://www.rfc-editor.org/info/rfc7049)) +- Supports CBOR Sequences ([RFC 8742](https://datatracker.ietf.org/doc/html/rfc8742)) +- Robust platform-independent C99 implementation, tested on + - Linux, OS X, Windows, BSD + - x86(_64), arm(64), mips(el), riscv64 +- Layered architecture offers both control and convenience +- Flexible memory management +- No shared global state - threading friendly +- Proper handling of UTF-8 +- Full support for streams & incremental processing +- Extensive documentation and test suite +- No runtime dependencies, small footprint + +## References + +libcbor is most prominently used in: + +- Yubico's [libfido2](https://developers.yubico.com/libfido2/) 2FA security key implementation +- Amazon's [AWS C SDK](https://github.com/awslabs/aws-c-common) +- Gnome [fwdup](https://github.com/fwupd/fwupd/blob/main/meson.build#L339) +- Alibaba's [Inclavare librats](https://github.com/inclavare-containers/librats) +- [QEMU](https://wiki.qemu.org/ChangeLog/9.2) +- [ITK](https://docs.itk.org/projects/wasm/en/latest/introduction/parts.html) + +It found its way into many open source an proprietary projects. If you run among others [OpenSSH](https://www.matbra.com/2020/02/17/using-fido2-with-ssh.html), [Microsoft PowerShell](https://github.com/PowerShell/libcbor), [SteamOS](https://github.com/randombk/steamos-teardown/blob/5a37d977fae55d9c41eaf1d07528fa965740bb26/docs/packages.md?plain=1#L461), or [MySQL](https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-34.html) -- you might be indirectly running libcbor too. + +Also, thank you for the shout out in <https://github.com/oz123/awesome-c?tab=readme-ov-file#others>! + ## Getting started ### Compile from source @@ -48,7 +67,15 @@ sudo apt-get install libcbor-dev yum install libcbor-devel ``` -### Others +### Include git repository using using CMake + +See e.g. <https://github.com/inclavare-containers/librats/blob/master/cmake/LibCBOR.cmake>. + +## Include git repository using Bazel + +See <https://github.com/PJK/libcbor/tree/master/examples/bazel>. + +### Others <details> <summary>Packaged libcbor is available from 15+ major repositories. Click here for more detail</summary> @@ -89,6 +116,9 @@ int main(void) { ``` ## Documentation + +Crash course: <https://libcbor.readthedocs.io/en/latest/tutorial.html#crash-course> + Get the latest documentation at [libcbor.readthedocs.org](http://libcbor.readthedocs.org/) ## Contributions @@ -98,6 +128,7 @@ Bug reports and contributions are welcome. Please see [CONTRIBUTING.md](https:// Kudos to all the [contributors](https://github.com/PJK/libcbor/graphs/contributors)! ## License + The MIT License (MIT) Copyright (c) Pavel Kalvoda, 2014-2020 diff --git a/contrib/libcbor/doc/source/api.rst b/contrib/libcbor/doc/source/api.rst index 75b0541cb94e..616f12103124 100644 --- a/contrib/libcbor/doc/source/api.rst +++ b/contrib/libcbor/doc/source/api.rst @@ -30,12 +30,12 @@ The API is designed to allow both very tight control & flexibility and general c api/encoding api/streaming_decoding api/streaming_encoding - api/type_0_1 - api/type_2 - api/type_3 - api/type_4 - api/type_5 - api/type_6 - api/type_7 + api/type_0_1_integers + api/type_2_byte_strings + api/type_3_strings + api/type_4_arrays + api/type_5_maps + api/type_6_tags + api/type_7_floats_ctrls .. [#] http://softwareengineering.vazexqi.com/files/pattern.html diff --git a/contrib/libcbor/doc/source/api/item_reference_counting.rst b/contrib/libcbor/doc/source/api/item_reference_counting.rst index 70075cb67e5b..f590ac2e2292 100644 --- a/contrib/libcbor/doc/source/api/item_reference_counting.rst +++ b/contrib/libcbor/doc/source/api/item_reference_counting.rst @@ -36,3 +36,4 @@ The destruction is synchronous and renders any pointers to items with refcount z .. doxygenfunction:: cbor_refcount .. doxygenfunction:: cbor_move .. doxygenfunction:: cbor_copy +.. doxygenfunction:: cbor_copy_definite diff --git a/contrib/libcbor/doc/source/api/streaming_encoding.rst b/contrib/libcbor/doc/source/api/streaming_encoding.rst index 25100da9b5e0..ebb2f72057af 100644 --- a/contrib/libcbor/doc/source/api/streaming_encoding.rst +++ b/contrib/libcbor/doc/source/api/streaming_encoding.rst @@ -7,7 +7,7 @@ exposes a low-level encoding API to encode CBOR objects on the fly. Unlike strings, etc.) instead of :type:`cbor_item_t`. The client is responsible for constructing the compound types correctly (e.g. terminating arrays). -Streaming encoding is typically used to create an streaming (indefinite length) CBOR :doc:`strings <type_2>`, :doc:`byte strings <type_3>`, :doc:`arrays <type_4>`, and :doc:`maps <type_5>`. Complete example: `examples/streaming_array.c <https://github.com/PJK/libcbor/blob/master/examples/streaming_array.c>`_ +Streaming encoding is typically used to create an streaming (indefinite length) CBOR :doc:`strings <type_2_byte_strings>`, :doc:`byte strings <type_3_strings>`, :doc:`arrays <type_4_arrays>`, and :doc:`maps <type_5_maps>`. Complete example: `examples/streaming_array.c <https://github.com/PJK/libcbor/blob/master/examples/streaming_array.c>`_ .. doxygenfunction:: cbor_encode_uint8 diff --git a/contrib/libcbor/doc/source/api/type_0_1.rst b/contrib/libcbor/doc/source/api/type_0_1_integers.rst index 4fc851dd1ed1..4fc851dd1ed1 100644 --- a/contrib/libcbor/doc/source/api/type_0_1.rst +++ b/contrib/libcbor/doc/source/api/type_0_1_integers.rst diff --git a/contrib/libcbor/doc/source/api/type_2.rst b/contrib/libcbor/doc/source/api/type_2_byte_strings.rst index ff9369a90d67..d0644fffb137 100644 --- a/contrib/libcbor/doc/source/api/type_2.rst +++ b/contrib/libcbor/doc/source/api/type_2_byte_strings.rst @@ -1,7 +1,7 @@ Type 2 – Byte strings ============================= -CBOR byte strings are just (ordered) series of bytes without further interpretation (unless there is a :doc:`tag <type_6>`). Byte string's length may or may not be known during encoding. These two kinds of byte strings can be distinguished using :func:`cbor_bytestring_is_definite` and :func:`cbor_bytestring_is_indefinite` respectively. +CBOR byte strings are just (ordered) series of bytes without further interpretation (unless there is a :doc:`tag <type_6_tags>`). Byte string's length may or may not be known during encoding. These two kinds of byte strings can be distinguished using :func:`cbor_bytestring_is_definite` and :func:`cbor_bytestring_is_indefinite` respectively. In case a byte string is indefinite, it is encoded as a series of definite byte strings. These are called "chunks". For example, the encoded item diff --git a/contrib/libcbor/doc/source/api/type_3.rst b/contrib/libcbor/doc/source/api/type_3_strings.rst index be06fc176566..847c474b0417 100644 --- a/contrib/libcbor/doc/source/api/type_3.rst +++ b/contrib/libcbor/doc/source/api/type_3_strings.rst @@ -1,7 +1,7 @@ Type 3 – UTF-8 strings ============================= -CBOR strings have the same structure as :doc:`type_2`. +CBOR strings have the same structure as :doc:`type_2_byte_strings`. ================================== ====================================================== Corresponding :type:`cbor_type` ``CBOR_TYPE_STRING`` diff --git a/contrib/libcbor/doc/source/api/type_4.rst b/contrib/libcbor/doc/source/api/type_4_arrays.rst index a76202f4a0fd..2fa615df0eb4 100644 --- a/contrib/libcbor/doc/source/api/type_4.rst +++ b/contrib/libcbor/doc/source/api/type_4_arrays.rst @@ -1,7 +1,7 @@ Type 4 – Arrays ============================= -CBOR arrays, just like :doc:`byte strings <type_2>` and :doc:`strings <type_3>`, can be encoded either as definite, or as indefinite. +CBOR arrays, just like :doc:`byte strings <type_2_byte_strings>` and :doc:`strings <type_3_strings>`, can be encoded either as definite, or as indefinite. Definite arrays have a fixed size which is stored in the header, whereas indefinite arrays do not and are terminated by a special "break" byte instead. Arrays are explicitly created or decoded as definite or indefinite and will be encoded using the corresponding wire representation, regardless of whether the actual size is known at the time of encoding. diff --git a/contrib/libcbor/doc/source/api/type_5.rst b/contrib/libcbor/doc/source/api/type_5_maps.rst index 7f7be273aba9..7f7be273aba9 100644 --- a/contrib/libcbor/doc/source/api/type_5.rst +++ b/contrib/libcbor/doc/source/api/type_5_maps.rst diff --git a/contrib/libcbor/doc/source/api/type_6.rst b/contrib/libcbor/doc/source/api/type_6_tags.rst index e98457ceae2a..1ee5f6949a2e 100644 --- a/contrib/libcbor/doc/source/api/type_6.rst +++ b/contrib/libcbor/doc/source/api/type_6_tags.rst @@ -5,7 +5,12 @@ Tag are additional metadata that can be used to extend or specialize the meaning For example, one might tag an array of numbers to communicate that it should be interpreted as a vector. -Please consult the official `IANA repository of CBOR tags <https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml>`_ before inventing new ones. +Please consult the official `IANA repository of CBOR tags <https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml>`_ for known registered values. + +Please note that libcbor does *not* understand the semantics of tags and will +process all `well-formed <https://datatracker.ietf.org/doc/html/rfc8949#name-terminology>`_ +tags regardless of whether they are valid with respect to the data they are +applied to. ================================== ====================================================== Corresponding :type:`cbor_type` ``CBOR_TYPE_TAG`` diff --git a/contrib/libcbor/doc/source/api/type_7.rst b/contrib/libcbor/doc/source/api/type_7_floats_ctrls.rst index b105402a08b2..d893fe37cb7b 100644 --- a/contrib/libcbor/doc/source/api/type_7.rst +++ b/contrib/libcbor/doc/source/api/type_7_floats_ctrls.rst @@ -60,9 +60,16 @@ Manipulating existing items .. doxygenfunction:: cbor_set_float8 -.. _api_type_7_hard_floats: +.. _api_type_7_floats_ctrls_half_floats: Half floats ~~~~~~~~~~~~ CBOR supports two `bytes wide ("half-precision") <https://en.wikipedia.org/wiki/Half-precision_floating-point_format>`_ floats which are not supported by the C language. *libcbor* represents them using `float <https://en.cppreference.com/w/c/language/type>` values throughout the API. Encoding will be performed by :func:`cbor_encode_half`, which will handle any values that cannot be represented as a half-float. + +Signaling NaNs +~~~~~~~~~~~~~~~~ + +`Signaling NaNs <https://en.wikipedia.org/wiki/NaN#Signaling_NaN)>`_ are always encoded as a standard, "quiet" NaN. + +The reason for this simplification is that standard C does not offer a way to handle the signaling payload without assumptions about the host architecture. See https://github.com/PJK/libcbor/issues/336 for more context. diff --git a/contrib/libcbor/doc/source/conf.py b/contrib/libcbor/doc/source/conf.py index 0eee7103bb5a..52c19154d04e 100644 --- a/contrib/libcbor/doc/source/conf.py +++ b/contrib/libcbor/doc/source/conf.py @@ -77,8 +77,8 @@ copyright = '2014 - 2020, Pavel Kalvoda' # built documents. # # The short X.Y version. -version = '0.11' -release = '0.11.0' +version = '0.13' +release = '0.13.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/contrib/libcbor/doc/source/development.rst b/contrib/libcbor/doc/source/development.rst index 0b0ac21007ed..9715476a55d6 100644 --- a/contrib/libcbor/doc/source/development.rst +++ b/contrib/libcbor/doc/source/development.rst @@ -99,7 +99,8 @@ Development dependencies - There are some `Ruby <https://www.ruby-lang.org/en/>`_ scripts in ``misc`` - `Valgrind <http://valgrind.org/>`_ (memory correctness & profiling) - `GCOV/LCOV <http://ltp.sourceforge.net/coverage/lcov.php>`_ (test coverage) -- `clang-format` +- `clang-format` (linter) +- `cmakelang <https://cmake-format.readthedocs.io/en/latest/index.html>`_ (linter) Installing *sphinx* @@ -107,13 +108,16 @@ Installing *sphinx* .. code-block:: bash - pip install sphinx - pip install sphinx_rtd_theme - pip install breathe - pip install https://github.com/lepture/python-livereload/archive/master.zip - pip install sphinx-autobuild + pip install -r doc/source/requirements.txt -Further instructions on configuring advanced features can be found at `<http://read-the-docs.readthedocs.org/en/latest/install.html>`_. + +To update the Python dependencies: + +.. code-block:: bash + + pip-compile --upgrade doc/source/requirements.in + +Sphinx reference: `<http://read-the-docs.readthedocs.org/en/latest/install.html>`_. Live preview of docs diff --git a/contrib/libcbor/doc/source/getting_started.rst b/contrib/libcbor/doc/source/getting_started.rst index 98c5a3956337..5f67a86f66c1 100644 --- a/contrib/libcbor/doc/source/getting_started.rst +++ b/contrib/libcbor/doc/source/getting_started.rst @@ -38,31 +38,73 @@ Prerequisites: A handful of configuration flags can be passed to `cmake`. The following table lists libcbor compile-time directives and several important generic flags. -======================== ======================================================= ====================== ===================================================================================================================== -Option Meaning Default Possible values ------------------------- ------------------------------------------------------- ---------------------- --------------------------------------------------------------------------------------------------------------------- -``CMAKE_C_COMPILER`` C compiler to use ``cc`` ``gcc``, ``clang``, ``clang-3.5``, ... -``CMAKE_INSTALL_PREFIX`` Installation prefix System-dependent ``/usr/local/lib``, ... -``BUILD_SHARED_LIBS`` Build as a shared library ``OFF`` ``ON``, ``OFF`` -``HUGE_FUZZ`` :doc:`Fuzz test </tests>` with 8GB of data ``OFF`` ``ON``, ``OFF`` -``SANE_MALLOC`` Assume ``malloc`` will refuse unreasonable allocations ``OFF`` ``ON``, ``OFF`` -``COVERAGE`` Generate test coverage instrumentation ``OFF`` ``ON``, ``OFF`` -``WITH_TESTS`` Build unit tests (see :doc:`development`) ``OFF`` ``ON``, ``OFF`` -======================== ======================================================= ====================== ===================================================================================================================== +.. list-table:: + :header-rows: 1 + + * - Option + - Meaning + - Default + - Possible values + * - ``CMAKE_C_COMPILER`` + - C compiler to use + - ``cc`` + - ``gcc``, ``clang``, ``clang-3.5``, ... + * - ``CMAKE_INSTALL_PREFIX`` + - Installation prefix + - System-dependent + - ``/usr/local/lib``, ... + * - ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` + - Enable LTO (if supported) + - System-dependent + - ``ON``, ``OFF`` + * - ``BUILD_SHARED_LIBS`` + - Build as a shared library + - ``OFF`` + - ``ON``, ``OFF`` + * - ``HUGE_FUZZ`` + - :doc:`Fuzz test </tests>` with 8GB of data + - ``OFF`` + - ``ON``, ``OFF`` + * - ``SANE_MALLOC`` + - Assume ``malloc`` will refuse unreasonable allocations + - ``OFF`` + - ``ON``, ``OFF`` + * - ``COVERAGE`` + - Generate test coverage instrumentation + - ``OFF`` + - ``ON``, ``OFF`` + * - ``WITH_TESTS`` + - Build unit tests (see :doc:`development`) + - ``OFF`` + - ``ON``, ``OFF`` + The following configuration options will also be defined as macros [#]_ in ``<cbor/common.h>`` and can therefore be used in client code: -======================== ======================================================= ====================== ===================================================================================================================== -Option Meaning Default Possible values ------------------------- ------------------------------------------------------- ---------------------- --------------------------------------------------------------------------------------------------------------------- -``CBOR_PRETTY_PRINTER`` Include a pretty-printing routine ``ON`` ``ON``, ``OFF`` -``CBOR_BUFFER_GROWTH`` Factor for buffer growth & shrinking ``2`` Decimals > 1 -======================== ======================================================= ====================== ===================================================================================================================== +.. list-table:: + :header-rows: 1 + + * - Option + - Meaning + - Default + - Possible values + * - ``CBOR_PRETTY_PRINTER`` + - Include a pretty-printing routine + - ``ON`` + - ``ON``, ``OFF`` + * - ``CBOR_BUFFER_GROWTH`` + - Factor for buffer growth & shrinking + - ``2`` + - Decimals > 1 + .. [#] ``ON`` & ``OFF`` will be translated to ``1`` and ``0`` using `cmakedefine <https://cmake.org/cmake/help/v3.2/command/configure_file.html?highlight=cmakedefine>`_. If you want to pass other custom configuration options, please refer to `<http://www.cmake.org/Wiki/CMake_Useful_Variables>`_. +.. note:: + When ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is enabled, the generated static library (`libcbor.a`) should be used with an LTO-enabled linker downstream. On LLVM toolchains without bitcode embedding (`-fembed-bitcode`), the archive will contain LLVM IR only and linking without LTO `will not work <https://github.com/PJK/libcbor/issues/372>`_. + .. warning:: ``CBOR_CUSTOM_ALLOC`` has been `removed <https://github.com/PJK/libcbor/pull/237>`_. Custom allocators (historically a controlled by a build flag) are always enabled. diff --git a/contrib/libcbor/doc/source/index.rst b/contrib/libcbor/doc/source/index.rst index d3d62cf75c41..06ef1a059891 100644 --- a/contrib/libcbor/doc/source/index.rst +++ b/contrib/libcbor/doc/source/index.rst @@ -3,6 +3,8 @@ libcbor Documentation for version |release|, updated on |today|. +Git repo: https://github.com/PJK/libcbor + Overview -------- *libcbor* is a C library for parsing and generating CBOR_, the general-purpose schema-less binary data format. @@ -28,7 +30,7 @@ Contents .. toctree:: getting_started - using + tutorial api tests standard_conformance diff --git a/contrib/libcbor/doc/source/internal.rst b/contrib/libcbor/doc/source/internal.rst index e30cb11dffa1..07cd7cfc10c6 100644 --- a/contrib/libcbor/doc/source/internal.rst +++ b/contrib/libcbor/doc/source/internal.rst @@ -82,7 +82,7 @@ Generally speaking, data items consist of three parts: .. member:: unsigned char * data - Contains pointer to the actual data. Small, fixed size items (:doc:`api/type_0_1`, :doc:`api/type_6`, :doc:`api/type_7`) are allocated as a single memory block. + Contains pointer to the actual data. Small, fixed size items (:doc:`api/type_0_1_integers`, :doc:`api/type_6_tags`, :doc:`api/type_7_floats_ctrls`) are allocated as a single memory block. Consider the following snippet @@ -103,7 +103,7 @@ Generally speaking, data items consist of three parts: | | +--- item +--- item->data - Dynamically sized types (:doc:`api/type_2`, :doc:`api/type_3`, :doc:`api/type_4`, :doc:`api/type_5`) may store handle and data in separate locations. This enables creating large items (e.g :doc:`byte strings <api/type_2>`) without :func:`realloc` or copying large blocks of memory. One simply attaches the correct pointer to the handle. + Dynamically sized types (:doc:`api/type_2_byte_strings`, :doc:`api/type_3_strings`, :doc:`api/type_4_arrays`, :doc:`api/type_5_maps`) may store handle and data in separate locations. This enables creating large items (e.g :doc:`byte strings <api/type_2_byte_strings>`) without :func:`realloc` or copying large blocks of memory. One simply attaches the correct pointer to the handle. .. type:: cbor_item_metadata @@ -112,7 +112,7 @@ Generally speaking, data items consist of three parts: .. member:: struct _cbor_int_metadata int_metadata - Used both by both :doc:`api/type_0_1` + Used both by both :doc:`api/type_0_1_integers` .. member:: struct _cbor_bytestring_metadata bytestring_metadata .. member:: struct _cbor_string_metadata string_metadata diff --git a/contrib/libcbor/doc/source/requirements.in b/contrib/libcbor/doc/source/requirements.in new file mode 100644 index 000000000000..000ba6286d17 --- /dev/null +++ b/contrib/libcbor/doc/source/requirements.in @@ -0,0 +1,5 @@ +sphinx +sphinx_rtd_theme +breathe +livereload +sphinx-autobuild diff --git a/contrib/libcbor/doc/source/requirements.txt b/contrib/libcbor/doc/source/requirements.txt index 502d79cc62e6..44b77ded4f36 100644 --- a/contrib/libcbor/doc/source/requirements.txt +++ b/contrib/libcbor/doc/source/requirements.txt @@ -1,31 +1,91 @@ -alabaster==0.7.13 -Babel==2.13.1 -breathe==4.35.0 -certifi==2023.11.17 -charset-normalizer==3.3.2 +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile doc/source/requirements.in +# +alabaster==1.0.0 + # via sphinx +anyio==4.9.0 + # via + # starlette + # watchfiles +babel==2.17.0 + # via sphinx +breathe==4.36.0 + # via -r doc/source/requirements.in +certifi==2025.1.31 + # via requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via uvicorn colorama==0.4.6 -docutils==0.18.1 -idna==3.4 + # via sphinx-autobuild +docutils==0.21.2 + # via + # sphinx + # sphinx-rtd-theme +h11==0.14.0 + # via uvicorn +idna==3.10 + # via + # anyio + # requests imagesize==1.4.1 -importlib-metadata==6.8.0 -Jinja2==3.1.2 -livereload==2.6.3 -MarkupSafe==2.1.3 -packaging==23.2 -Pygments==2.16.1 -pyparsing==3.1.1 -pytz==2021.3 -requests==2.31.0 + # via sphinx +jinja2==3.1.6 + # via sphinx +livereload==2.7.1 + # via -r doc/source/requirements.in +markupsafe==3.0.2 + # via jinja2 +packaging==25.0 + # via sphinx +pygments==2.19.1 + # via sphinx +requests==2.32.3 + # via sphinx +roman-numerals-py==3.1.0 + # via sphinx +sniffio==1.3.1 + # via anyio snowballstemmer==2.2.0 -Sphinx==7.2.6 -sphinx-autobuild==2021.3.14 -sphinx-rtd-theme==1.3.0 -sphinxcontrib-applehelp==1.0.7 -sphinxcontrib-devhelp==1.0.5 -sphinxcontrib-htmlhelp==2.0.4 + # via sphinx +sphinx==8.2.3 + # via + # -r doc/source/requirements.in + # breathe + # sphinx-autobuild + # sphinx-rtd-theme + # sphinxcontrib-jquery +sphinx-autobuild==2024.10.3 + # via -r doc/source/requirements.in +sphinx-rtd-theme==3.0.2 + # via -r doc/source/requirements.in +sphinxcontrib-applehelp==2.0.0 + # via sphinx +sphinxcontrib-devhelp==2.0.0 + # via sphinx +sphinxcontrib-htmlhelp==2.1.0 + # via sphinx +sphinxcontrib-jquery==4.1 + # via sphinx-rtd-theme sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.6 -sphinxcontrib-serializinghtml==1.1.9 -tornado==6.3.3 -urllib3==2.1.0 -zipp==3.17.0 + # via sphinx +sphinxcontrib-qthelp==2.0.0 + # via sphinx +sphinxcontrib-serializinghtml==2.0.0 + # via sphinx +starlette==0.46.2 + # via sphinx-autobuild +tornado==6.4.2 + # via livereload +urllib3==2.4.0 + # via requests +uvicorn==0.34.2 + # via sphinx-autobuild +watchfiles==1.0.5 + # via sphinx-autobuild +websockets==15.0.1 + # via sphinx-autobuild diff --git a/contrib/libcbor/doc/source/standard_conformance.rst b/contrib/libcbor/doc/source/standard_conformance.rst index 62965f0c4493..4f57bb4c091f 100644 --- a/contrib/libcbor/doc/source/standard_conformance.rst +++ b/contrib/libcbor/doc/source/standard_conformance.rst @@ -13,5 +13,5 @@ There is no explicit limitation of indefinite length byte strings. [#]_ *libcbor --------------------------------- As of C99 and even C11, there is no standard implementation for 2 bytes floats. *libcbor* packs them as a `float <https://en.cppreference.com/w/c/language/type>`. When encoding, *libcbor* selects the appropriate wire representation based on metadata and the actual value. This applies both to canonical and normal mode. -For more information on half-float serialization, please refer to the section on :ref:`api_type_7_hard_floats`. +For more information on half-float serialization, please refer to the section on :ref:`api_type_7_floats_ctrls_half_floats`. diff --git a/contrib/libcbor/doc/source/tutorial.rst b/contrib/libcbor/doc/source/tutorial.rst new file mode 100644 index 000000000000..81859ccb2192 --- /dev/null +++ b/contrib/libcbor/doc/source/tutorial.rst @@ -0,0 +1,66 @@ +Tutorial +=========================== + +*libcbor* is a C library to encode, decode, and manipulate CBOR data. It is to CBOR to what `cJSON <https://github.com/DaveGamble/cJSON>`_ is to JSON. We assume you are familiar with the CBOR standard. If not, we recommend `cbor.io <http://cbor.io/>`_. + + +Where to start +-------------- + +- Skim through the Crash course section below. +- Examples of of how to read, write, manipulate, and translate data to and from JSON using *libcbor* are in the `examples directory <https://github.com/PJK/libcbor/tree/master/examples>`_. +- The :doc:`API documentation <api>` is a complete reference of *libcbor*. + + +Crash course +---------------- + +CBOR data objects are ``cbor_item_t`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../../examples/crash_course.c + :language: C + :start-after: // Part 1: Begin + :end-before: // Part 1: End + + +Objects can be serialized and deserialized +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../../examples/crash_course.c + :language: C + :start-after: // Part 2: Begin + :end-before: // Part 2: End + + +Reference counting +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../../examples/crash_course.c + :language: C + :start-after: // Part 3: Begin + :end-before: // Part 3: End + + +Moving intermediate values +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../../examples/crash_course.c + :language: C + :start-after: // Part 4: Begin + :end-before: // Part 4: End + + +Ownership +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../../examples/crash_course.c + :language: C + :start-after: // Part 5: Begin + :end-before: // Part 5: End + + +Streaming IO +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See https://github.com/PJK/libcbor/blob/master/examples/streaming_array.c, https://github.com/PJK/libcbor/blob/master/examples/streaming_parser.c
\ No newline at end of file diff --git a/contrib/libcbor/doc/source/using.rst b/contrib/libcbor/doc/source/using.rst deleted file mode 100644 index ccb7372f23b6..000000000000 --- a/contrib/libcbor/doc/source/using.rst +++ /dev/null @@ -1,174 +0,0 @@ -Usage & preliminaries -======================= - -Version information --------------------- - -libcbor exports its version using three self-explanatory macros: - - - ``CBOR_MAJOR_VERSION`` - - ``CBOR_MINOR_VERSION`` - - ``CBOR_PATCH_VERSION`` - -The ``CBOR_VERSION`` is a string concatenating these three identifiers into one (e.g. ``0.2.0``). - -In order to simplify version comparisons, the version is also exported as - -.. code-block:: c - - #define CBOR_HEX_VERSION ((CBOR_MAJOR_VERSION << 16) | (CBOR_MINOR_VERSION << 8) | CBOR_PATCH_VERSION) - -Since macros are difficult to work with through FFIs, the same information is also available through three ``uint8_t`` constants, -namely - - - ``cbor_major_version`` - - ``cbor_minor_version`` - - ``cbor_patch_version`` - - -Headers to include ---------------------- - -The ``cbor.h`` header includes all the symbols. If, for any reason, you don't want to include all the exported symbols, -feel free to use just some of the ``cbor/*.h`` headers: - - - ``cbor/arrays.h`` - :doc:`api/type_4` - - ``cbor/bytestrings.h`` - :doc:`api/type_2` - - ``cbor/callbacks.h`` - Callbacks used for :doc:`api/streaming_decoding` - - ``cbor/common.h`` - Common utilities - always transitively included - - ``cbor/data.h`` - Data types definitions - always transitively included - - ``cbor/encoding.h`` - Streaming encoders for :doc:`api/streaming_encoding` - - ``cbor/floats_ctrls.h`` - :doc:`api/type_7` - - ``cbor/ints.h`` - :doc:`api/type_0_1` - - ``cbor/maps.h`` - :doc:`api/type_5` - - ``cbor/serialization.h`` - High level serialization such as :func:`cbor_serialize` - - ``cbor/streaming.h`` - Home of :func:`cbor_stream_decode` - - ``cbor/strings.h`` - :doc:`api/type_3` - - ``cbor/tags.h`` - :doc:`api/type_6` - - -Using libcbor --------------- - -If you want to get more familiar with CBOR, we recommend the `cbor.io <http://cbor.io/>`_ website. Once you get the grasp -of what is it CBOR does, the examples (located in the ``examples`` directory) should give you a good feel of the API. The -:doc:`API documentation <api>` should then provide with all the information you may need. - - -**Creating and serializing items** - -.. code-block:: c - - #include "cbor.h" - #include <stdio.h> - - int main(int argc, char * argv[]) - { - /* Preallocate the map structure */ - cbor_item_t * root = cbor_new_definite_map(2); - /* Add the content */ - cbor_map_add(root, (struct cbor_pair) { - .key = cbor_move(cbor_build_string("Is CBOR awesome?")), - .value = cbor_move(cbor_build_bool(true)) - }); - cbor_map_add(root, (struct cbor_pair) { - .key = cbor_move(cbor_build_uint8(42)), - .value = cbor_move(cbor_build_string("Is the answer")) - }); - /* Output: `buffer_size` bytes of data in the `buffer` */ - unsigned char * buffer; - size_t buffer_size; - cbor_serialize_alloc(root, &buffer, &buffer_size); - - fwrite(buffer, 1, buffer_size, stdout); - free(buffer); - - fflush(stdout); - cbor_decref(&root); - } - - -**Reading serialized data** - -.. code-block:: c - - #include "cbor.h" - #include <stdio.h> - - /* - * Reads data from a file. Example usage: - * $ ./examples/readfile examples/data/nested_array.cbor - */ - - int main(int argc, char * argv[]) - { - FILE * f = fopen(argv[1], "rb"); - fseek(f, 0, SEEK_END); - size_t length = (size_t)ftell(f); - fseek(f, 0, SEEK_SET); - unsigned char * buffer = malloc(length); - fread(buffer, length, 1, f); - - /* Assuming `buffer` contains `info.st_size` bytes of input data */ - struct cbor_load_result result; - cbor_item_t * item = cbor_load(buffer, length, &result); - /* Pretty-print the result */ - cbor_describe(item, stdout); - fflush(stdout); - /* Deallocate the result */ - cbor_decref(&item); - - fclose(f); - } - - -**Using the streaming parser** - -.. code-block:: c - - #include "cbor.h" - #include <stdio.h> - #include <string.h> - - /* - * Illustrates how one might skim through a map (which is assumed to have - * string keys and values only), looking for the value of a specific key - * - * Use the examples/data/map.cbor input to test this. - */ - - const char * key = "a secret key"; - bool key_found = false; - - void find_string(void * _ctx, cbor_data buffer, size_t len) - { - if (key_found) { - printf("Found the value: %*s\n", (int) len, buffer); - key_found = false; - } else if (len == strlen(key)) { - key_found = (memcmp(key, buffer, len) == 0); - } - } - - int main(int argc, char * argv[]) - { - FILE * f = fopen(argv[1], "rb"); - fseek(f, 0, SEEK_END); - size_t length = (size_t)ftell(f); - fseek(f, 0, SEEK_SET); - unsigned char * buffer = malloc(length); - fread(buffer, length, 1, f); - - struct cbor_callbacks callbacks = cbor_empty_callbacks; - struct cbor_decoder_result decode_result; - size_t bytes_read = 0; - callbacks.string = find_string; - while (bytes_read < length) { - decode_result = cbor_stream_decode(buffer + bytes_read, - length - bytes_read, - &callbacks, NULL); - bytes_read += decode_result.read; - } - - fclose(f); - } diff --git a/contrib/libcbor/examples/CMakeLists.txt b/contrib/libcbor/examples/CMakeLists.txt index b1f2ec290e7a..f0d9e9749963 100644 --- a/contrib/libcbor/examples/CMakeLists.txt +++ b/contrib/libcbor/examples/CMakeLists.txt @@ -16,17 +16,22 @@ target_link_libraries(sort cbor) add_executable(hello hello.c) target_link_libraries(hello cbor) +add_executable(cbor_sequence cbor_sequence.c) +target_link_libraries(cbor_sequence cbor) + +add_executable(crash_course crash_course.c) +target_link_libraries(crash_course cbor) + find_package(CJSON) if(CJSON_FOUND) - add_executable(cjson2cbor cjson2cbor.c) - target_include_directories(cjson2cbor PUBLIC ${CJSON_INCLUDE_DIRS}) - target_link_libraries(cjson2cbor cbor ${CJSON_LIBRARY}) + add_executable(cjson2cbor cjson2cbor.c) + target_include_directories(cjson2cbor PUBLIC ${CJSON_INCLUDE_DIRS}) + target_link_libraries(cjson2cbor cbor ${CJSON_LIBRARY}) - add_executable(cbor2cjson cbor2cjson.c) - target_include_directories(cbor2cjson PUBLIC ${CJSON_INCLUDE_DIRS}) - target_link_libraries(cbor2cjson cbor ${CJSON_LIBRARY}) + add_executable(cbor2cjson cbor2cjson.c) + target_include_directories(cbor2cjson PUBLIC ${CJSON_INCLUDE_DIRS}) + target_link_libraries(cbor2cjson cbor ${CJSON_LIBRARY}) endif() file(COPY data DESTINATION .) - diff --git a/contrib/libcbor/examples/bazel/MODULE.bazel b/contrib/libcbor/examples/bazel/MODULE.bazel new file mode 100644 index 000000000000..5d14df5cf761 --- /dev/null +++ b/contrib/libcbor/examples/bazel/MODULE.bazel @@ -0,0 +1,13 @@ +module( + name = "libcbor_bazel_example", + version = "0.1.0", +) + +bazel_dep(name = "rules_cc", version = "0.1.1") +bazel_dep(name = "googletest", version = "1.15.2") + +bazel_dep(name = "libcbor") +local_path_override( + module_name = "libcbor", + path = "../..", +) diff --git a/contrib/libcbor/examples/bazel/WORKSPACE b/contrib/libcbor/examples/bazel/WORKSPACE deleted file mode 100644 index c7601f3035ff..000000000000 --- a/contrib/libcbor/examples/bazel/WORKSPACE +++ /dev/null @@ -1,19 +0,0 @@ -workspace(name = "libcbor_bazel_example") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -# Google Test -http_archive( - name = "gtest", - sha256 = "94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91", - strip_prefix = "googletest-release-1.10.0", - url = "https://github.com/google/googletest/archive/release-1.10.0.zip", -) - -# libcbor -new_local_repository( - name = "libcbor", - build_file = "//third_party:libcbor.BUILD", - path = "../../src", -) - diff --git a/contrib/libcbor/examples/bazel/src/BUILD b/contrib/libcbor/examples/bazel/src/BUILD index d3acb578398a..367094644a5f 100644 --- a/contrib/libcbor/examples/bazel/src/BUILD +++ b/contrib/libcbor/examples/bazel/src/BUILD @@ -2,7 +2,7 @@ load("@rules_cc//cc:defs.bzl", "cc_library") load("@rules_cc//cc:defs.bzl", "cc_binary") cc_library( - name = "src", + name = "hello_lib", srcs = [ "hello.cc", ], @@ -18,7 +18,7 @@ cc_library( ) cc_test( - name = "tests", + name = "hello_test", size = "small", srcs = [ "hello_test.cc", @@ -27,8 +27,8 @@ cc_test( "//visibility:private", ], deps = [ - ":src", - "@gtest//:gtest_main", + ":hello_lib", + "@googletest//:gtest_main", "@libcbor//:cbor", ], ) @@ -40,7 +40,7 @@ cc_binary( "main.cc", ], deps = [ - ":src", + ":hello_lib", ], ) diff --git a/contrib/libcbor/examples/bazel/src/hello_test.cc b/contrib/libcbor/examples/bazel/src/hello_test.cc index 68d8633c8772..7aa85e79fc5a 100644 --- a/contrib/libcbor/examples/bazel/src/hello_test.cc +++ b/contrib/libcbor/examples/bazel/src/hello_test.cc @@ -1,10 +1,14 @@ #include "src/hello.h" +#include "cbor.h" #include "gtest/gtest.h" -class HelloTest : public ::testing::Test {}; +class CborTest : public ::testing::Test {}; -TEST_F(HelloTest, CborVersion) { - EXPECT_EQ(cbor_version(), 0); +TEST_F(CborTest, IntegerItem) { + cbor_item_t * answer = cbor_build_uint8(42); + EXPECT_EQ(cbor_get_uint8(answer), 42); + cbor_decref(&answer); + EXPECT_EQ(answer, nullptr); } diff --git a/contrib/libcbor/examples/bazel/third_party/libcbor.BUILD b/contrib/libcbor/examples/bazel/third_party/libcbor.BUILD deleted file mode 100644 index 45f4975b4fc8..000000000000 --- a/contrib/libcbor/examples/bazel/third_party/libcbor.BUILD +++ /dev/null @@ -1,21 +0,0 @@ -cc_library( - name = "cbor", - srcs = glob([ - "src/**/*.h", - "src/**/*.c", - ]), - hdrs = [ - "cbor.h", - ] + glob([ - "cbor/*.h", - ]), - includes = [ - "src", - "src/cbor", - "src/cbor/internal", - ], - visibility = ["//visibility:public"], - deps = [ - "@libcbor_bazel_example//third_party/libcbor:config", - ], -) diff --git a/contrib/libcbor/examples/bazel/third_party/libcbor/cbor/configuration.h b/contrib/libcbor/examples/bazel/third_party/libcbor/cbor/configuration.h index ddf6b9dc5f2b..c0122ff9fcfc 100644 --- a/contrib/libcbor/examples/bazel/third_party/libcbor/cbor/configuration.h +++ b/contrib/libcbor/examples/bazel/third_party/libcbor/cbor/configuration.h @@ -2,7 +2,7 @@ #define LIBCBOR_CONFIGURATION_H #define CBOR_MAJOR_VERSION 0 -#define CBOR_MINOR_VERSION 11 +#define CBOR_MINOR_VERSION 13 #define CBOR_PATCH_VERSION 0 #define CBOR_BUFFER_GROWTH 2 diff --git a/contrib/libcbor/examples/cbor2cjson.c b/contrib/libcbor/examples/cbor2cjson.c index 99ddc9ee09e1..df6a5041b9ba 100644 --- a/contrib/libcbor/examples/cbor2cjson.c +++ b/contrib/libcbor/examples/cbor2cjson.c @@ -41,7 +41,8 @@ cJSON* cbor_to_cjson(cbor_item_t* item) { case CBOR_TYPE_ARRAY: { cJSON* result = cJSON_CreateArray(); for (size_t i = 0; i < cbor_array_size(item); i++) { - cJSON_AddItemToArray(result, cbor_to_cjson(cbor_array_get(item, i))); + cJSON_AddItemToArray(result, + cbor_to_cjson(cbor_move(cbor_array_get(item, i)))); } return result; } diff --git a/contrib/libcbor/examples/cbor_sequence.c b/contrib/libcbor/examples/cbor_sequence.c new file mode 100644 index 000000000000..02c43185ff58 --- /dev/null +++ b/contrib/libcbor/examples/cbor_sequence.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014-2020 Pavel Kalvoda <me@pavelkalvoda.com> + * + * libcbor is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include <stdio.h> +#include <string.h> + +#include "cbor.h" + +void write_cbor_sequence(const char* filename) { + FILE* file = fopen(filename, "wb"); + if (!file) { + fprintf(stderr, "Error: Could not open file %s for writing\n", filename); + return; + } + + // Create example CBOR items + cbor_item_t* int_item = cbor_build_uint32(42); + cbor_item_t* string_item = cbor_build_string("Hello, CBOR!"); + cbor_item_t* array_item = cbor_new_definite_array(2); + assert(cbor_array_push(array_item, cbor_build_uint8(1))); + assert(cbor_array_push(array_item, cbor_build_uint8(2))); + + // Serialize and write items to the file + unsigned char* buffer; + size_t buffer_size; + + cbor_serialize_alloc(int_item, &buffer, &buffer_size); + fwrite(buffer, 1, buffer_size, file); + free(buffer); + cbor_decref(&int_item); + + cbor_serialize_alloc(string_item, &buffer, &buffer_size); + fwrite(buffer, 1, buffer_size, file); + free(buffer); + cbor_decref(&string_item); + + cbor_serialize_alloc(array_item, &buffer, &buffer_size); + fwrite(buffer, 1, buffer_size, file); + free(buffer); + cbor_decref(&array_item); + + fclose(file); + printf("CBOR sequence written to %s\n", filename); +} + +void read_cbor_sequence(const char* filename) { + FILE* file = fopen(filename, "rb"); + if (!file) { + fprintf(stderr, "Error: Could not open file %s\n", filename); + return; + } + + fseek(file, 0, SEEK_END); + size_t file_size = ftell(file); + fseek(file, 0, SEEK_SET); + + unsigned char* buffer = malloc(file_size); + if (!buffer) { + fprintf(stderr, "Error: Could not allocate memory\n"); + fclose(file); + return; + } + + fread(buffer, 1, file_size, file); + fclose(file); + + struct cbor_load_result result; + size_t offset = 0; + + while (offset < file_size) { + cbor_item_t* item = cbor_load(buffer + offset, file_size - offset, &result); + if (result.error.code != CBOR_ERR_NONE) { + fprintf(stderr, "Error: Failed to parse CBOR item at offset %zu\n", + offset); + break; + } + + cbor_describe(item, stdout); + printf("\n"); + + offset += result.read; + cbor_decref(&item); + } + + free(buffer); +} + +int main(int argc, char* argv[]) { + if (argc != 3) { + fprintf(stderr, "Usage: %s <r|w> <file>\n", argv[0]); + return 1; + } + + if (strcmp(argv[1], "w") == 0) { + write_cbor_sequence(argv[2]); + } else if (strcmp(argv[1], "r") == 0) { + read_cbor_sequence(argv[2]); + } else { + fprintf(stderr, + "Error: First argument must be 'r' (read) or 'w' (write)\n"); + return 1; + } + + return 0; +} diff --git a/contrib/libcbor/examples/cjson2cbor.c b/contrib/libcbor/examples/cjson2cbor.c index b67439902442..1182175d684d 100644 --- a/contrib/libcbor/examples/cjson2cbor.c +++ b/contrib/libcbor/examples/cjson2cbor.c @@ -22,10 +22,10 @@ #include "cbor/internal/builder_callbacks.h" #include "cbor/internal/loaders.h" -typedef void (*cbor_load_callback_t)(cJSON *, const struct cbor_callbacks *, - void *); +typedef void (*cbor_load_callback_t)(cJSON*, const struct cbor_callbacks*, + void*); -cbor_item_t *cjson_cbor_load(void *source, +cbor_item_t* cjson_cbor_load(void* source, cbor_load_callback_t cbor_load_callback) { static struct cbor_callbacks callbacks = { .uint64 = &cbor_builder_uint64_callback, @@ -51,9 +51,9 @@ cbor_item_t *cjson_cbor_load(void *source, return context.root; } -void cjson_cbor_stream_decode(cJSON *source, - const struct cbor_callbacks *callbacks, - void *context) { +void cjson_cbor_stream_decode(cJSON* source, + const struct cbor_callbacks* callbacks, + void* context) { switch (source->type) { case cJSON_False: { callbacks->boolean(context, false); @@ -83,13 +83,13 @@ void cjson_cbor_stream_decode(cJSON *source, } case cJSON_String: { // XXX: Assume cJSON handled unicode correctly - callbacks->string(context, (unsigned char *)source->valuestring, + callbacks->string(context, (unsigned char*)source->valuestring, strlen(source->valuestring)); return; } case cJSON_Array: { callbacks->array_start(context, cJSON_GetArraySize(source)); - cJSON *item = source->child; + cJSON* item = source->child; while (item != NULL) { cjson_cbor_stream_decode(item, callbacks, context); item = item->next; @@ -98,9 +98,9 @@ void cjson_cbor_stream_decode(cJSON *source, } case cJSON_Object: { callbacks->map_start(context, cJSON_GetArraySize(source)); - cJSON *item = source->child; + cJSON* item = source->child; while (item != NULL) { - callbacks->string(context, (unsigned char *)item->string, + callbacks->string(context, (unsigned char*)item->string, strlen(item->string)); cjson_cbor_stream_decode(item, callbacks, context); item = item->next; @@ -115,24 +115,24 @@ void usage(void) { exit(1); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 2) usage(); - FILE *f = fopen(argv[1], "rb"); + FILE* f = fopen(argv[1], "rb"); if (f == NULL) usage(); /* Read input file into a buffer (cJSON doesn't work with streams) */ fseek(f, 0, SEEK_END); size_t length = (size_t)ftell(f); fseek(f, 0, SEEK_SET); - char *json_buffer = malloc(length + 1); + char* json_buffer = malloc(length + 1); fread(json_buffer, length, 1, f); json_buffer[length] = '\0'; /* Convert between JSON and CBOR */ - cJSON *json = cJSON_Parse(json_buffer); - cbor_item_t *cbor = cjson_cbor_load(json, cjson_cbor_stream_decode); + cJSON* json = cJSON_Parse(json_buffer); + cbor_item_t* cbor = cjson_cbor_load(json, cjson_cbor_stream_decode); /* Print out CBOR bytes */ - unsigned char *buffer; + unsigned char* buffer; size_t buffer_size; cbor_serialize_alloc(cbor, &buffer, &buffer_size); diff --git a/contrib/libcbor/examples/crash_course.c b/contrib/libcbor/examples/crash_course.c new file mode 100644 index 000000000000..4bd9f9379d26 --- /dev/null +++ b/contrib/libcbor/examples/crash_course.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2014-2020 Pavel Kalvoda <me@pavelkalvoda.com> + * + * libcbor is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include <stdbool.h> +#include <stdio.h> +#include <string.h> +#include "cbor.h" + +// Part 1: Begin +void item_examples() { + // A cbor_item_t can contain any CBOR data type + cbor_item_t* float_item = cbor_build_float4(3.14f); + cbor_item_t* string_item = cbor_build_string("Hello World!"); + cbor_item_t* array_item = cbor_new_indefinite_array(); + + // They can be inspected + assert(cbor_is_float(float_item)); + assert(cbor_typeof(string_item) == CBOR_TYPE_STRING); + assert(cbor_array_is_indefinite(array_item)); + assert(cbor_array_size(array_item) == 0); + + // The data can be accessed + assert(cbor_float_get_float4(float_item) == 3.14f); + assert(memcmp(cbor_string_handle(string_item), "Hello World!", + cbor_string_length(string_item)) == 0); + + // And they can be modified + assert(cbor_array_push(array_item, float_item)); + assert(cbor_array_push(array_item, string_item)); + assert(cbor_array_size(array_item) == 2); + + // At the end of their lifetime, items must be freed + cbor_decref(&float_item); + cbor_decref(&string_item); + cbor_decref(&array_item); +} +// Part 1: End + +// Part 2: Begin +void encode_decode() { + cbor_item_t* item = cbor_build_uint8(42); + + // Serialize the item to a buffer (it will be allocated by libcbor) + unsigned char* buffer; + size_t buffer_size; + cbor_serialize_alloc(item, &buffer, &buffer_size); + assert(buffer_size == 2); + assert(buffer[0] == 0x18); // Encoding byte for uint8 + assert(buffer[1] == 42); // The value itself + + // And deserialize bytes back to an item + struct cbor_load_result result; + cbor_item_t* decoded_item = cbor_load(buffer, buffer_size, &result); + assert(result.error.code == CBOR_ERR_NONE); + assert(cbor_isa_uint(decoded_item)); + assert(cbor_get_uint8(decoded_item) == 42); + + // Free the allocated buffer and items + free(buffer); + cbor_decref(&decoded_item); + cbor_decref(&item); +} +// Part 2: End + +// Part 3: Begin +void reference_counting() { + // cbor_item_t is a reference counted pointer under the hood + cbor_item_t* item = cbor_build_uint8(42); + + // Reference count starts at 1 + assert(cbor_refcount(item) == 1); + + // Most operations have reference semantics + cbor_item_t* array_item = cbor_new_definite_array(1); + assert(cbor_array_push(array_item, item)); + assert(cbor_refcount(item) == 2); // item and array_item reference it + cbor_item_t* first_array_element = cbor_array_get(array_item, 0); + assert(first_array_element == item); // same item under the hood + assert(cbor_refcount(item) == + 3); // and now first_array_element also points to it + + // To release the reference, use cbor_decref + cbor_decref(&first_array_element); + + // When reference count reaches 0, the item is freed + assert(cbor_refcount(array_item) == 1); + cbor_decref(&array_item); + assert(array_item == NULL); + assert(cbor_refcount(item) == 1); + + // Be careful, loops leak memory! + + // Deep copy copies the whole item tree + cbor_item_t* item_copy = cbor_copy(item); + assert(cbor_refcount(item) == 1); + assert(cbor_refcount(item_copy) == 1); + assert(item_copy != item); + cbor_decref(&item); + cbor_decref(&item_copy); +} +// Part 3: End + +// Part 4: Begin +void moving_values() { + { + // Move the "42" into an array. + cbor_item_t* array_item = cbor_new_definite_array(1); + // The line below leaks memory! + assert(cbor_array_push(array_item, cbor_build_uint8(42))); + cbor_item_t* first_array_element = cbor_array_get(array_item, 0); + assert(cbor_refcount(first_array_element) == 3); // Should be 2! + cbor_decref(&first_array_element); + cbor_decref(&array_item); + assert(cbor_refcount(first_array_element) == 1); // Shouldn't exist! + // Clean up + cbor_decref(&first_array_element); + } + + { + // A correct way to move values is to decref them in the caller scope. + cbor_item_t* array_item = cbor_new_definite_array(1); + cbor_item_t* item = cbor_build_uint8(42); + assert(cbor_array_push(array_item, item)); + assert(cbor_refcount(item) == 2); + // "Give up" the item + cbor_decref(&item); + cbor_decref(&array_item); + // item is a dangling pointer at this point + } + + { + // cbor_move avoids the need to decref and the dangling pointer + cbor_item_t* array_item = cbor_new_definite_array(1); + assert(cbor_array_push(array_item, cbor_move(cbor_build_uint8(42)))); + cbor_item_t* first_array_element = cbor_array_get(array_item, 0); + assert(cbor_refcount(first_array_element) == 2); + cbor_decref(&first_array_element); + cbor_decref(&array_item); + } +} +// Part 4: End + +// Part 5: Begin +// Refcount can be managed in conjunction with ownership +static cbor_item_t* global_item = NULL; + +// This function takes shared ownership of the item +void borrow_item(cbor_item_t* item) { + global_item = item; + // Mark the extra reference + cbor_incref(item); +} + +void return_item() { + cbor_decref(&global_item); + global_item = NULL; +} + +void reference_ownership() { + cbor_item_t* item = cbor_build_uint8(42); + + // Lend the item + borrow_item(item); + assert(cbor_refcount(item) == 2); + cbor_decref(&item); + + // Release the shared ownership. return_item will deallocate the item. + return_item(); +} +// Part 5: End + +int main(void) { + item_examples(); + encode_decode(); + reference_counting(); + moving_values(); + reference_ownership(); + return 0; +} diff --git a/contrib/libcbor/examples/sort.c b/contrib/libcbor/examples/sort.c index 22f3760b4843..66da12fd246a 100644 --- a/contrib/libcbor/examples/sort.c +++ b/contrib/libcbor/examples/sort.c @@ -14,9 +14,9 @@ * standard library functions. */ -int compareUint(const void *a, const void *b) { - uint8_t av = cbor_get_uint8(*(cbor_item_t **)a), - bv = cbor_get_uint8(*(cbor_item_t **)b); +int compare_uint(const void* a, const void* b) { + uint8_t av = cbor_get_uint8(*(cbor_item_t**)a), + bv = cbor_get_uint8(*(cbor_item_t**)b); if (av < bv) return -1; @@ -27,15 +27,15 @@ int compareUint(const void *a, const void *b) { } int main(void) { - cbor_item_t *array = cbor_new_definite_array(4); + cbor_item_t* array = cbor_new_definite_array(4); bool success = cbor_array_push(array, cbor_move(cbor_build_uint8(4))); success &= cbor_array_push(array, cbor_move(cbor_build_uint8(3))); success &= cbor_array_push(array, cbor_move(cbor_build_uint8(1))); success &= cbor_array_push(array, cbor_move(cbor_build_uint8(2))); if (!success) return 1; - qsort(cbor_array_handle(array), cbor_array_size(array), sizeof(cbor_item_t *), - compareUint); + qsort(cbor_array_handle(array), cbor_array_size(array), sizeof(cbor_item_t*), + compare_uint); cbor_describe(array, stdout); fflush(stdout); diff --git a/contrib/libcbor/examples/streaming_array.c b/contrib/libcbor/examples/streaming_array.c index d165e62b1c9e..f3e7295df0f3 100644 --- a/contrib/libcbor/examples/streaming_array.c +++ b/contrib/libcbor/examples/streaming_array.c @@ -30,7 +30,8 @@ void flush(size_t bytes) { */ int main(int argc, char* argv[]) { if (argc != 2) usage(); - long n = strtol(argv[1], NULL, 10); + size_t n; + scanf(argv[1], "%zu", &n); out = freopen(NULL, "wb", stdout); if (!out) exit(1); diff --git a/contrib/libcbor/examples/streaming_parser.c b/contrib/libcbor/examples/streaming_parser.c index f5eacb4fb7ac..bb760ca8cd7c 100644 --- a/contrib/libcbor/examples/streaming_parser.c +++ b/contrib/libcbor/examples/streaming_parser.c @@ -9,12 +9,6 @@ #include <string.h> #include "cbor.h" -#ifdef __GNUC__ -#define UNUSED(x) __attribute__((__unused__)) x -#else -#define UNUSED(x) x -#endif - void usage(void) { printf("Usage: streaming_parser [input file]\n"); exit(1); @@ -30,7 +24,7 @@ void usage(void) { const char* key = "a secret key"; bool key_found = false; -void find_string(void* UNUSED(_ctx), cbor_data buffer, uint64_t len) { +void find_string(void* _ctx _CBOR_UNUSED, cbor_data buffer, uint64_t len) { if (key_found) { printf("Found the value: %.*s\n", (int)len, buffer); key_found = false; diff --git a/contrib/libcbor/misc/asan_suppressions.osx.supp b/contrib/libcbor/misc/asan_suppressions.osx.supp new file mode 100644 index 000000000000..5503d9412a53 --- /dev/null +++ b/contrib/libcbor/misc/asan_suppressions.osx.supp @@ -0,0 +1,3 @@ +leak:initializeNonMetaClass +# via _cmocka_run_group_tests +leak:tlv_get_addr
\ No newline at end of file diff --git a/contrib/libcbor/misc/hooks/pre-commit b/contrib/libcbor/misc/hooks/pre-commit index f3ac9f483799..16d6c37a94d7 100755 --- a/contrib/libcbor/misc/hooks/pre-commit +++ b/contrib/libcbor/misc/hooks/pre-commit @@ -2,10 +2,16 @@ set -e -# Run clang-format and add modified files +# Run cmake-lint, clang-format, and add modified files MODIFIED_UNSTAGED=$(git -C . diff --name-only) MODIFIED_STAGED=$(git -C . diff --name-only --cached --diff-filter=d) +CMAKE_FILES=$(echo "${MODIFIED_STAGED} ${MODIFIED_UNSTAGED}" | grep -oE '(\S*)CMakeLists.txt' | cat) +if ! cmake-lint --line-width 140 ${CMAKE_FILES} > /dev/null; then + echo "cmake-lint failed:" + cmake-lint --line-width 140 ${CMAKE_FILES} +fi + ./clang-format.sh git add ${MODIFIED_STAGED} @@ -15,4 +21,3 @@ if [[ ${MODIFIED_UNSTAGED} != $(git -C . diff --name-only) ]]; then "them" fi - diff --git a/contrib/libcbor/misc/update_version.py b/contrib/libcbor/misc/update_version.py index 475953021cc4..22d175b4bc06 100755 --- a/contrib/libcbor/misc/update_version.py +++ b/contrib/libcbor/misc/update_version.py @@ -35,16 +35,16 @@ replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', - '''SET\\(CBOR_VERSION_MAJOR "\d+"\\) -SET\\(CBOR_VERSION_MINOR "\d+"\\) -SET\\(CBOR_VERSION_PATCH "\d+"\\)''', - f'''SET(CBOR_VERSION_MAJOR "{major}") -SET(CBOR_VERSION_MINOR "{minor}") -SET(CBOR_VERSION_PATCH "{patch}")''') + r'''set\(CBOR_VERSION_MAJOR "\d+"\) +set\(CBOR_VERSION_MINOR "\d+"\) +set\(CBOR_VERSION_PATCH "\d+"\)''', + f'''set(CBOR_VERSION_MAJOR "{major}") +set(CBOR_VERSION_MINOR "{minor}") +set(CBOR_VERSION_PATCH "{patch}")''') # Update Basel build example replace('examples/bazel/third_party/libcbor/cbor/configuration.h', - '''#define CBOR_MAJOR_VERSION \d+ + r'''#define CBOR_MAJOR_VERSION \d+ #define CBOR_MINOR_VERSION \d+ #define CBOR_PATCH_VERSION \d+''', f'''#define CBOR_MAJOR_VERSION {major} diff --git a/contrib/libcbor/oss-fuzz/build.sh b/contrib/libcbor/oss-fuzz/build.sh index e7b1df3ada0a..504937221042 100755 --- a/contrib/libcbor/oss-fuzz/build.sh +++ b/contrib/libcbor/oss-fuzz/build.sh @@ -15,10 +15,14 @@ # ################################################################################ -mkdir build -cd build +mkdir oss_fuzz_build +cd oss_fuzz_build # We disable libcbor's default sanitizers since we'll be configuring them ourselves via CFLAGS. -cmake -D CMAKE_BUILD_TYPE=Debug -D CMAKE_INSTALL_PREFIX="$WORK" -D SANITIZE=OFF .. +cmake -D CMAKE_BUILD_TYPE=Debug \ + -D CMAKE_INSTALL_PREFIX="$WORK" \ + -D SANITIZE=OFF \ + -D CMAKE_INTERPROCEDURAL_OPTIMIZATION=OFF \ + .. make "-j$(nproc)" make install diff --git a/contrib/libcbor/oss-fuzz/cbor_load_fuzzer.cc b/contrib/libcbor/oss-fuzz/cbor_load_fuzzer.cc index 0ab04e5a1374..6c681c344bf3 100644 --- a/contrib/libcbor/oss-fuzz/cbor_load_fuzzer.cc +++ b/contrib/libcbor/oss-fuzz/cbor_load_fuzzer.cc @@ -61,7 +61,7 @@ void *limited_realloc(void *ptr, size_t size) { struct State { FILE* fout; - State() : fout(fopen("/dev/null", "r")) { + State() : fout(fopen("/dev/null", "w")) { cbor_set_allocs(limited_malloc, limited_realloc, limited_free); } }; diff --git a/contrib/libcbor/release.sh b/contrib/libcbor/release.sh index ec56862883eb..790c650de4f0 100755 --- a/contrib/libcbor/release.sh +++ b/contrib/libcbor/release.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Guides my forgetful self through the release process. -# Usage release.sh VERSION +# Usage: release.sh 0.42.0 set -e @@ -17,9 +17,16 @@ function prompt() { DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" OUTDIR=$(mktemp -d) TAG_NAME="v$1" +BRANCH_NAME="release-$1" + +echo ">>>>> Bumping version" cd $DIR +git checkout -b "$BRANCH_NAME" python3 misc/update_version.py "$1" +git commit -a -m "Bump version to $1" +git log -2 +prompt "Check the repository state, everything looks good?" echo ">>>>> Checking changelog" grep -A 10 -F "$1" CHANGELOG.md || true @@ -30,7 +37,7 @@ grep PROJECT_NUMBER Doxyfile prompt "Is the Doxyfile version correct?" echo ">>>>> Checking CMakeLists" -grep -A 2 'SET(CBOR_VERSION_MAJOR' CMakeLists.txt +grep -A 2 'set(CBOR_VERSION_MAJOR' CMakeLists.txt prompt "Is the CMake version correct?" echo ">>>>> Checking Bazel build" @@ -64,8 +71,15 @@ make ctest popd +echo ">>>>> Pushing version bump branch" +git push --set-upstream origin $(git rev-parse --abbrev-ref HEAD) +echo "Open and merge PR: https://github.com/PJK/libcbor/pull/new/${BRANCH_NAME}" +prompt "Did you merge the PR?" + +git checkout master +git pull + prompt "Will proceed to tag the release with $TAG_NAME." -git commit -a -m "Release $TAG_NAME" git tag "$TAG_NAME" git push --set-upstream origin $(git rev-parse --abbrev-ref HEAD) git push --tags diff --git a/contrib/libcbor/src/CMakeLists.txt b/contrib/libcbor/src/CMakeLists.txt index e9312395db25..7b0a72bda872 100644 --- a/contrib/libcbor/src/CMakeLists.txt +++ b/contrib/libcbor/src/CMakeLists.txt @@ -1,70 +1,104 @@ -set(SOURCES cbor.c allocators.c cbor/streaming.c cbor/internal/encoders.c cbor/internal/builder_callbacks.c cbor/internal/loaders.c cbor/internal/memory_utils.c cbor/internal/stack.c cbor/internal/unicode.c cbor/encoding.c cbor/serialization.c cbor/arrays.c cbor/common.c cbor/floats_ctrls.c cbor/bytestrings.c cbor/callbacks.c cbor/strings.c cbor/maps.c cbor/tags.c cbor/ints.c) +set(SOURCES + cbor.c + allocators.c + cbor/streaming.c + cbor/internal/encoders.c + cbor/internal/builder_callbacks.c + cbor/internal/loaders.c + cbor/internal/memory_utils.c + cbor/internal/stack.c + cbor/internal/unicode.c + cbor/encoding.c + cbor/serialization.c + cbor/arrays.c + cbor/common.c + cbor/floats_ctrls.c + cbor/bytestrings.c + cbor/callbacks.c + cbor/strings.c + cbor/maps.c + cbor/tags.c + cbor/ints.c) include(JoinPaths) include(CheckFunctionExists) set(CMAKE_SKIP_BUILD_RPATH FALSE) -if (NOT DEFINED CMAKE_MACOSX_RPATH) - set(CMAKE_MACOSX_RPATH 0) +if(NOT DEFINED CMAKE_MACOSX_RPATH) + set(CMAKE_MACOSX_RPATH 0) endif() add_library(cbor ${SOURCES}) -target_include_directories(cbor PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +target_include_directories(cbor PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}> + $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}> + $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>) +set_target_properties(cbor PROPERTIES EXPORT_NAME libcbor) +# For vendored builds +add_library(libcbor::libcbor ALIAS cbor) # Explicitly link math.h if necessary check_function_exists(ldexp LDEXP_AVAILABLE) -if (NOT LDEXP_AVAILABLE) - target_link_libraries(cbor m) +if(NOT LDEXP_AVAILABLE) + target_link_libraries(cbor m) endif() include(GenerateExportHeader) -generate_export_header(cbor EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/cbor/cbor_export.h) -target_include_directories(cbor PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cbor/cbor_export.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/cbor) +generate_export_header(cbor EXPORT_FILE_NAME + ${CMAKE_CURRENT_BINARY_DIR}/cbor/cbor_export.h) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cbor/cbor_export.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/cbor) -if (NOT ${CBOR_VERSION_MAJOR} EQUAL 0) - MESSAGE(FATAL_ERROR "Change the shared library version scheme to reflect https://github.com/PJK/libcbor/issues/52.") +if(NOT ${CBOR_VERSION_MAJOR} EQUAL 0) + message( + FATAL_ERROR + "Change the shared library version scheme to reflect https://github.com/PJK/libcbor/issues/52." + ) endif() -set_target_properties(cbor PROPERTIES - VERSION ${CBOR_VERSION} - MACHO_COMPATIBILITY_VERSION ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}.0 - SOVERSION ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}) +set_target_properties( + cbor + PROPERTIES VERSION ${CBOR_VERSION} + MACHO_COMPATIBILITY_VERSION + ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}.0 + SOVERSION ${CBOR_VERSION_MAJOR}.${CBOR_VERSION_MINOR}) join_paths(libdir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_LIBDIR}") join_paths(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}") configure_file(libcbor.pc.in libcbor.pc @ONLY) # http://www.cmake.org/Wiki/CMake:Install_Commands -install(TARGETS cbor - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) +install( + TARGETS cbor + EXPORT libcborTargets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) -install(DIRECTORY cbor DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - FILES_MATCHING PATTERN "*.h") +install( + DIRECTORY cbor + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING + PATTERN "*.h") install(FILES cbor.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libcbor.pc" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") include(CMakePackageConfigHelpers) configure_package_config_file( - libcborConfig.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/libcborConfig.cmake + libcborConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/libcborConfig.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libcbor - PATH_VARS CMAKE_INSTALL_INCLUDEDIR -) + PATH_VARS CMAKE_INSTALL_INCLUDEDIR) write_basic_package_version_file( - ${CMAKE_CURRENT_BINARY_DIR}/libcborConfigVersion.cmake - VERSION ${CBOR_VERSION} - COMPATIBILITY SameMajorVersion -) -install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/libcborConfig.cmake - ${CMAKE_CURRENT_BINARY_DIR}/libcborConfigVersion.cmake - DESTINATION - ${CMAKE_INSTALL_LIBDIR}/cmake/libcbor - ) + ${CMAKE_CURRENT_BINARY_DIR}/libcborConfigVersion.cmake + VERSION ${CBOR_VERSION} + COMPATIBILITY SameMajorVersion) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libcborConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/libcborConfigVersion.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libcbor) +install(EXPORT libcborTargets + NAMESPACE libcbor:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libcbor) diff --git a/contrib/libcbor/src/cbor.c b/contrib/libcbor/src/cbor.c index a8b4bcd7a080..93a3709f5423 100644 --- a/contrib/libcbor/src/cbor.c +++ b/contrib/libcbor/src/cbor.c @@ -5,13 +5,15 @@ * it under the terms of the MIT license. See LICENSE for details. */ +#include <stdbool.h> +#include <string.h> + #include "cbor.h" #include "cbor/internal/builder_callbacks.h" #include "cbor/internal/loaders.h" -#pragma clang diagnostic push -cbor_item_t *cbor_load(cbor_data source, size_t source_size, - struct cbor_load_result *result) { +cbor_item_t* cbor_load(cbor_data source, size_t source_size, + struct cbor_load_result* result) { /* Context stack */ static struct cbor_callbacks callbacks = { .uint8 = &cbor_builder_uint8_callback, @@ -115,8 +117,11 @@ error: return NULL; } -static cbor_item_t *_cbor_copy_int(cbor_item_t *item, bool negative) { - cbor_item_t *res; +static cbor_item_t* _cbor_copy_int(cbor_item_t* item, bool negative) { + CBOR_ASSERT(cbor_isa_uint(item) || cbor_isa_negint(item)); + CBOR_ASSERT(cbor_int_get_width(item) >= CBOR_INT_8 && + cbor_int_get_width(item) <= CBOR_INT_64); + cbor_item_t* res = NULL; switch (cbor_int_get_width(item)) { case CBOR_INT_8: res = cbor_build_uint8(cbor_get_uint8(item)); @@ -137,8 +142,10 @@ static cbor_item_t *_cbor_copy_int(cbor_item_t *item, bool negative) { return res; } -static cbor_item_t *_cbor_copy_float_ctrl(cbor_item_t *item) { - // cppcheck-suppress missingReturn +static cbor_item_t* _cbor_copy_float_ctrl(cbor_item_t* item) { + CBOR_ASSERT(cbor_isa_float_ctrl(item)); + CBOR_ASSERT(cbor_float_get_width(item) >= CBOR_FLOAT_0 && + cbor_float_get_width(item) <= CBOR_FLOAT_64); switch (cbor_float_get_width(item)) { case CBOR_FLOAT_0: return cbor_build_ctrl(cbor_ctrl_value(item)); @@ -148,11 +155,14 @@ static cbor_item_t *_cbor_copy_float_ctrl(cbor_item_t *item) { return cbor_build_float4(cbor_float_get_float4(item)); case CBOR_FLOAT_64: return cbor_build_float8(cbor_float_get_float8(item)); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return NULL; // LCOV_EXCL_START } } -cbor_item_t *cbor_copy(cbor_item_t *item) { - // cppcheck-suppress missingReturn +cbor_item_t* cbor_copy(cbor_item_t* item) { + CBOR_ASSERT_VALID_TYPE(cbor_typeof(item)); switch (cbor_typeof(item)) { case CBOR_TYPE_UINT: return _cbor_copy_int(item, false); @@ -163,13 +173,13 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { return cbor_build_bytestring(cbor_bytestring_handle(item), cbor_bytestring_length(item)); } else { - cbor_item_t *res = cbor_new_indefinite_bytestring(); + cbor_item_t* res = cbor_new_indefinite_bytestring(); if (res == NULL) { return NULL; } for (size_t i = 0; i < cbor_bytestring_chunk_count(item); i++) { - cbor_item_t *chunk_copy = + cbor_item_t* chunk_copy = cbor_copy(cbor_bytestring_chunks_handle(item)[i]); if (chunk_copy == NULL) { cbor_decref(&res); @@ -186,16 +196,16 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { } case CBOR_TYPE_STRING: if (cbor_string_is_definite(item)) { - return cbor_build_stringn((const char *)cbor_string_handle(item), + return cbor_build_stringn((const char*)cbor_string_handle(item), cbor_string_length(item)); } else { - cbor_item_t *res = cbor_new_indefinite_string(); + cbor_item_t* res = cbor_new_indefinite_string(); if (res == NULL) { return NULL; } for (size_t i = 0; i < cbor_string_chunk_count(item); i++) { - cbor_item_t *chunk_copy = + cbor_item_t* chunk_copy = cbor_copy(cbor_string_chunks_handle(item)[i]); if (chunk_copy == NULL) { cbor_decref(&res); @@ -211,7 +221,7 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { return res; } case CBOR_TYPE_ARRAY: { - cbor_item_t *res; + cbor_item_t* res; if (cbor_array_is_definite(item)) { res = cbor_new_definite_array(cbor_array_size(item)); } else { @@ -222,7 +232,7 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { } for (size_t i = 0; i < cbor_array_size(item); i++) { - cbor_item_t *entry_copy = cbor_copy(cbor_move(cbor_array_get(item, i))); + cbor_item_t* entry_copy = cbor_copy(cbor_move(cbor_array_get(item, i))); if (entry_copy == NULL) { cbor_decref(&res); return NULL; @@ -237,7 +247,7 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { return res; } case CBOR_TYPE_MAP: { - cbor_item_t *res; + cbor_item_t* res; if (cbor_map_is_definite(item)) { res = cbor_new_definite_map(cbor_map_size(item)); } else { @@ -247,14 +257,14 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { return NULL; } - struct cbor_pair *it = cbor_map_handle(item); + struct cbor_pair* it = cbor_map_handle(item); for (size_t i = 0; i < cbor_map_size(item); i++) { - cbor_item_t *key_copy = cbor_copy(it[i].key); + cbor_item_t* key_copy = cbor_copy(it[i].key); if (key_copy == NULL) { cbor_decref(&res); return NULL; } - cbor_item_t *value_copy = cbor_copy(it[i].value); + cbor_item_t* value_copy = cbor_copy(it[i].value); if (value_copy == NULL) { cbor_decref(&res); cbor_decref(&key_copy); @@ -273,16 +283,148 @@ cbor_item_t *cbor_copy(cbor_item_t *item) { return res; } case CBOR_TYPE_TAG: { - cbor_item_t *item_copy = cbor_copy(cbor_move(cbor_tag_item(item))); + cbor_item_t* item_copy = cbor_copy(cbor_move(cbor_tag_item(item))); if (item_copy == NULL) { return NULL; } - cbor_item_t *tag = cbor_build_tag(cbor_tag_value(item), item_copy); + cbor_item_t* tag = cbor_build_tag(cbor_tag_value(item), item_copy); cbor_decref(&item_copy); return tag; } case CBOR_TYPE_FLOAT_CTRL: return _cbor_copy_float_ctrl(item); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return NULL; // LCOV_EXCL_STOP + } +} + +cbor_item_t* cbor_copy_definite(cbor_item_t* item) { + CBOR_ASSERT_VALID_TYPE(cbor_typeof(item)); + switch (cbor_typeof(item)) { + case CBOR_TYPE_UINT: + case CBOR_TYPE_NEGINT: + return cbor_copy(item); + case CBOR_TYPE_BYTESTRING: + if (cbor_bytestring_is_definite(item)) { + return cbor_copy(item); + } else { + size_t total_length = 0; + for (size_t i = 0; i < cbor_bytestring_chunk_count(item); i++) { + total_length += + cbor_bytestring_length(cbor_bytestring_chunks_handle(item)[i]); + } + + unsigned char* combined_data = _cbor_malloc(total_length); + if (combined_data == NULL) { + return NULL; + } + + size_t offset = 0; + for (size_t i = 0; i < cbor_bytestring_chunk_count(item); i++) { + cbor_item_t* chunk = cbor_bytestring_chunks_handle(item)[i]; + memcpy(combined_data + offset, cbor_bytestring_handle(chunk), + cbor_bytestring_length(chunk)); + offset += cbor_bytestring_length(chunk); + } + + cbor_item_t* res = cbor_new_definite_bytestring(); + cbor_bytestring_set_handle(res, combined_data, total_length); + return res; + } + case CBOR_TYPE_STRING: + if (cbor_string_is_definite(item)) { + return cbor_copy(item); + } else { + size_t total_length = 0; + for (size_t i = 0; i < cbor_string_chunk_count(item); i++) { + total_length += + cbor_string_length(cbor_string_chunks_handle(item)[i]); + } + + unsigned char* combined_data = _cbor_malloc(total_length); + if (combined_data == NULL) { + return NULL; + } + + size_t offset = 0; + for (size_t i = 0; i < cbor_string_chunk_count(item); i++) { + cbor_item_t* chunk = cbor_string_chunks_handle(item)[i]; + memcpy(combined_data + offset, cbor_string_handle(chunk), + cbor_string_length(chunk)); + offset += cbor_string_length(chunk); + } + + cbor_item_t* res = cbor_new_definite_string(); + cbor_string_set_handle(res, combined_data, total_length); + return res; + } + case CBOR_TYPE_ARRAY: { + cbor_item_t* res = cbor_new_definite_array(cbor_array_size(item)); + if (res == NULL) { + return NULL; + } + + for (size_t i = 0; i < cbor_array_size(item); i++) { + cbor_item_t* entry_copy = + cbor_copy_definite(cbor_array_handle(item)[i]); + if (entry_copy == NULL) { + cbor_decref(&res); + return NULL; + } + // Cannot fail since we have a definite array preallocated + // cppcheck-suppress syntaxError + const bool item_pushed _CBOR_UNUSED = cbor_array_push(res, entry_copy); + CBOR_ASSERT(item_pushed); + cbor_decref(&entry_copy); + } + return res; + } + case CBOR_TYPE_MAP: { + cbor_item_t* res; + res = cbor_new_definite_map(cbor_map_size(item)); + if (res == NULL) { + return NULL; + } + + struct cbor_pair* it = cbor_map_handle(item); + for (size_t i = 0; i < cbor_map_size(item); i++) { + cbor_item_t* key_copy = cbor_copy_definite(it[i].key); + if (key_copy == NULL) { + cbor_decref(&res); + return NULL; + } + cbor_item_t* value_copy = cbor_copy_definite(it[i].value); + if (value_copy == NULL) { + cbor_decref(&res); + cbor_decref(&key_copy); + return NULL; + } + // Cannot fail since we have a definite map preallocated + // cppcheck-suppress syntaxError + const bool item_added _CBOR_UNUSED = cbor_map_add( + res, (struct cbor_pair){.key = key_copy, .value = value_copy}); + CBOR_ASSERT(item_added); + cbor_decref(&key_copy); + cbor_decref(&value_copy); + } + return res; + } + case CBOR_TYPE_TAG: { + cbor_item_t* item_copy = + cbor_copy_definite(cbor_move(cbor_tag_item(item))); + if (item_copy == NULL) { + return NULL; + } + cbor_item_t* tag = cbor_build_tag(cbor_tag_value(item), item_copy); + cbor_decref(&item_copy); + return tag; + } + case CBOR_TYPE_FLOAT_CTRL: + return cbor_copy(item); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return NULL; // LCOV_EXCL_STOP } } @@ -301,11 +443,13 @@ static int _pow(int b, int ex) { return res; } -static void _cbor_type_marquee(FILE *out, char *label, int indent) { +static void _cbor_type_marquee(FILE* out, char* label, int indent) { fprintf(out, "%*.*s[%s] ", indent, indent, " ", label); } -static void _cbor_nested_describe(cbor_item_t *item, FILE *out, int indent) { +static void _cbor_nested_describe(cbor_item_t* item, FILE* out, int indent) { + CBOR_ASSERT(cbor_typeof(item) >= CBOR_TYPE_UINT && + cbor_typeof(item) <= CBOR_TYPE_FLOAT_CTRL); const int indent_offset = 4; switch (cbor_typeof(item)) { case CBOR_TYPE_UINT: { @@ -329,7 +473,7 @@ static void _cbor_nested_describe(cbor_item_t *item, FILE *out, int indent) { _cbor_nested_describe(cbor_bytestring_chunks_handle(item)[i], out, indent + indent_offset); } else { - const unsigned char *data = cbor_bytestring_handle(item); + const unsigned char* data = cbor_bytestring_handle(item); fprintf(out, "Definite, Length: %zuB, Data:\n", cbor_bytestring_length(item)); fprintf(out, "%*s", indent + indent_offset, " "); @@ -418,7 +562,7 @@ static void _cbor_nested_describe(cbor_item_t *item, FILE *out, int indent) { } } -void cbor_describe(cbor_item_t *item, FILE *out) { +void cbor_describe(cbor_item_t* item, FILE* out) { _cbor_nested_describe(item, out, 0); } diff --git a/contrib/libcbor/src/cbor.h b/contrib/libcbor/src/cbor.h index 46ef8f267ac9..b2bb9771d13e 100644 --- a/contrib/libcbor/src/cbor.h +++ b/contrib/libcbor/src/cbor.h @@ -61,6 +61,17 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_load( */ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_copy(cbor_item_t* item); +/** Copy the item with all items converted to definite length equivalents + * + * Deep copy semantics follow #cbor_copy + * + * @param item item to copy + * @return Reference to the new item. The item's reference count is initialized + * to one. + * @return `NULL` if memory allocation fails + */ +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_copy_definite(cbor_item_t* item); + #if CBOR_PRETTY_PRINTER #include <stdio.h> diff --git a/contrib/libcbor/src/cbor/arrays.c b/contrib/libcbor/src/cbor/arrays.c index a23bbe3cd152..5433037cfdb8 100644 --- a/contrib/libcbor/src/cbor/arrays.c +++ b/contrib/libcbor/src/cbor/arrays.c @@ -5,25 +5,26 @@ * it under the terms of the MIT license. See LICENSE for details. */ +#include <stdbool.h> + #include "arrays.h" -#include <string.h> #include "internal/memory_utils.h" -size_t cbor_array_size(const cbor_item_t *item) { +size_t cbor_array_size(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_array(item)); return item->metadata.array_metadata.end_ptr; } -size_t cbor_array_allocated(const cbor_item_t *item) { +size_t cbor_array_allocated(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_array(item)); return item->metadata.array_metadata.allocated; } -cbor_item_t *cbor_array_get(const cbor_item_t *item, size_t index) { - return cbor_incref(((cbor_item_t **)item->data)[index]); +cbor_item_t* cbor_array_get(const cbor_item_t* item, size_t index) { + return cbor_incref(((cbor_item_t**)item->data)[index]); } -bool cbor_array_set(cbor_item_t *item, size_t index, cbor_item_t *value) { +bool cbor_array_set(cbor_item_t* item, size_t index, cbor_item_t* value) { if (index == item->metadata.array_metadata.end_ptr) { return cbor_array_push(item, value); } else if (index < item->metadata.array_metadata.end_ptr) { @@ -33,19 +34,19 @@ bool cbor_array_set(cbor_item_t *item, size_t index, cbor_item_t *value) { } } -bool cbor_array_replace(cbor_item_t *item, size_t index, cbor_item_t *value) { +bool cbor_array_replace(cbor_item_t* item, size_t index, cbor_item_t* value) { if (index >= item->metadata.array_metadata.end_ptr) return false; /* We cannot use cbor_array_get as that would increase the refcount */ - cbor_intermediate_decref(((cbor_item_t **)item->data)[index]); - ((cbor_item_t **)item->data)[index] = cbor_incref(value); + cbor_intermediate_decref(((cbor_item_t**)item->data)[index]); + ((cbor_item_t**)item->data)[index] = cbor_incref(value); return true; } -bool cbor_array_push(cbor_item_t *array, cbor_item_t *pushee) { +bool cbor_array_push(cbor_item_t* array, cbor_item_t* pushee) { CBOR_ASSERT(cbor_isa_array(array)); - struct _cbor_array_metadata *metadata = - (struct _cbor_array_metadata *)&array->metadata; - cbor_item_t **data = (cbor_item_t **)array->data; + struct _cbor_array_metadata* metadata = + (struct _cbor_array_metadata*)&array->metadata; + cbor_item_t** data = (cbor_item_t**)array->data; if (cbor_array_is_definite(array)) { /* Do not reallocate definite arrays */ if (metadata->end_ptr >= metadata->allocated) { @@ -64,8 +65,8 @@ bool cbor_array_push(cbor_item_t *array, cbor_item_t *pushee) { ? 1 : CBOR_BUFFER_GROWTH * metadata->allocated; - unsigned char *new_data = _cbor_realloc_multiple( - array->data, sizeof(cbor_item_t *), new_allocation); + unsigned char* new_data = _cbor_realloc_multiple( + array->data, sizeof(cbor_item_t*), new_allocation); if (new_data == NULL) { return false; } @@ -73,31 +74,31 @@ bool cbor_array_push(cbor_item_t *array, cbor_item_t *pushee) { array->data = new_data; metadata->allocated = new_allocation; } - ((cbor_item_t **)array->data)[metadata->end_ptr++] = pushee; + ((cbor_item_t**)array->data)[metadata->end_ptr++] = pushee; } cbor_incref(pushee); return true; } -bool cbor_array_is_definite(const cbor_item_t *item) { +bool cbor_array_is_definite(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_array(item)); return item->metadata.array_metadata.type == _CBOR_METADATA_DEFINITE; } -bool cbor_array_is_indefinite(const cbor_item_t *item) { +bool cbor_array_is_indefinite(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_array(item)); return item->metadata.array_metadata.type == _CBOR_METADATA_INDEFINITE; } -cbor_item_t **cbor_array_handle(const cbor_item_t *item) { +cbor_item_t** cbor_array_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_array(item)); - return (cbor_item_t **)item->data; + return (cbor_item_t**)item->data; } -cbor_item_t *cbor_new_definite_array(size_t size) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_definite_array(size_t size) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); - cbor_item_t **data = _cbor_alloc_multiple(sizeof(cbor_item_t *), size); + cbor_item_t** data = _cbor_alloc_multiple(sizeof(cbor_item_t*), size); _CBOR_DEPENDENT_NOTNULL(item, data); for (size_t i = 0; i < size; i++) { @@ -110,13 +111,13 @@ cbor_item_t *cbor_new_definite_array(size_t size) { .metadata = {.array_metadata = {.type = _CBOR_METADATA_DEFINITE, .allocated = size, .end_ptr = 0}}, - .data = (unsigned char *)data}; + .data = (unsigned char*)data}; return item; } -cbor_item_t *cbor_new_indefinite_array(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_indefinite_array(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ diff --git a/contrib/libcbor/src/cbor/arrays.h b/contrib/libcbor/src/cbor/arrays.h index db19e59d0624..891061ef765a 100644 --- a/contrib/libcbor/src/cbor/arrays.h +++ b/contrib/libcbor/src/cbor/arrays.h @@ -49,7 +49,8 @@ CBOR_EXPORT cbor_item_t* cbor_array_get(const cbor_item_t* item, size_t index); * returned. Creating arrays with holes is not possible. * * @param item An array - * @param value The item to assign + * @param value The item to assign. Its reference count will be increased by + * one. * @param index The index (zero-based) * @return `true` on success, `false` on allocation failure. */ diff --git a/contrib/libcbor/src/cbor/bytestrings.c b/contrib/libcbor/src/cbor/bytestrings.c index 528937179aee..f8290a56328a 100644 --- a/contrib/libcbor/src/cbor/bytestrings.c +++ b/contrib/libcbor/src/cbor/bytestrings.c @@ -9,27 +9,27 @@ #include <string.h> #include "internal/memory_utils.h" -size_t cbor_bytestring_length(const cbor_item_t *item) { +size_t cbor_bytestring_length(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_bytestring(item)); return item->metadata.bytestring_metadata.length; } -unsigned char *cbor_bytestring_handle(const cbor_item_t *item) { +unsigned char* cbor_bytestring_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_bytestring(item)); return item->data; } -bool cbor_bytestring_is_definite(const cbor_item_t *item) { +bool cbor_bytestring_is_definite(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_bytestring(item)); return item->metadata.bytestring_metadata.type == _CBOR_METADATA_DEFINITE; } -bool cbor_bytestring_is_indefinite(const cbor_item_t *item) { +bool cbor_bytestring_is_indefinite(const cbor_item_t* item) { return !cbor_bytestring_is_definite(item); } -cbor_item_t *cbor_new_definite_bytestring(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_definite_bytestring(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .refcount = 1, @@ -39,8 +39,8 @@ cbor_item_t *cbor_new_definite_bytestring(void) { return item; } -cbor_item_t *cbor_new_indefinite_bytestring(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_indefinite_bytestring(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .refcount = 1, @@ -49,7 +49,7 @@ cbor_item_t *cbor_new_indefinite_bytestring(void) { .length = 0}}, .data = _cbor_malloc(sizeof(struct cbor_indefinite_string_data))}; _CBOR_DEPENDENT_NOTNULL(item, item->data); - *((struct cbor_indefinite_string_data *)item->data) = + *((struct cbor_indefinite_string_data*)item->data) = (struct cbor_indefinite_string_data){ .chunk_count = 0, .chunk_capacity = 0, @@ -58,17 +58,17 @@ cbor_item_t *cbor_new_indefinite_bytestring(void) { return item; } -cbor_item_t *cbor_build_bytestring(cbor_data handle, size_t length) { - cbor_item_t *item = cbor_new_definite_bytestring(); +cbor_item_t* cbor_build_bytestring(cbor_data handle, size_t length) { + cbor_item_t* item = cbor_new_definite_bytestring(); _CBOR_NOTNULL(item); - void *content = _cbor_malloc(length); + void* content = _cbor_malloc(length); _CBOR_DEPENDENT_NOTNULL(item, content); memcpy(content, handle, length); cbor_bytestring_set_handle(item, content, length); return item; } -void cbor_bytestring_set_handle(cbor_item_t *item, +void cbor_bytestring_set_handle(cbor_item_t* item, cbor_mutable_data CBOR_RESTRICT_POINTER data, size_t length) { CBOR_ASSERT(cbor_isa_bytestring(item)); @@ -77,25 +77,25 @@ void cbor_bytestring_set_handle(cbor_item_t *item, item->metadata.bytestring_metadata.length = length; } -cbor_item_t **cbor_bytestring_chunks_handle(const cbor_item_t *item) { +cbor_item_t** cbor_bytestring_chunks_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_bytestring(item)); CBOR_ASSERT(cbor_bytestring_is_indefinite(item)); - return ((struct cbor_indefinite_string_data *)item->data)->chunks; + return ((struct cbor_indefinite_string_data*)item->data)->chunks; } -size_t cbor_bytestring_chunk_count(const cbor_item_t *item) { +size_t cbor_bytestring_chunk_count(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_bytestring(item)); CBOR_ASSERT(cbor_bytestring_is_indefinite(item)); - return ((struct cbor_indefinite_string_data *)item->data)->chunk_count; + return ((struct cbor_indefinite_string_data*)item->data)->chunk_count; } -bool cbor_bytestring_add_chunk(cbor_item_t *item, cbor_item_t *chunk) { +bool cbor_bytestring_add_chunk(cbor_item_t* item, cbor_item_t* chunk) { CBOR_ASSERT(cbor_isa_bytestring(item)); CBOR_ASSERT(cbor_bytestring_is_indefinite(item)); CBOR_ASSERT(cbor_isa_bytestring(chunk)); CBOR_ASSERT(cbor_bytestring_is_definite(chunk)); - struct cbor_indefinite_string_data *data = - (struct cbor_indefinite_string_data *)item->data; + struct cbor_indefinite_string_data* data = + (struct cbor_indefinite_string_data*)item->data; if (data->chunk_count == data->chunk_capacity) { if (!_cbor_safe_to_multiply(CBOR_BUFFER_GROWTH, data->chunk_capacity)) { return false; @@ -105,8 +105,8 @@ bool cbor_bytestring_add_chunk(cbor_item_t *item, cbor_item_t *chunk) { data->chunk_capacity == 0 ? 1 : CBOR_BUFFER_GROWTH * (data->chunk_capacity); - cbor_item_t **new_chunks_data = _cbor_realloc_multiple( - data->chunks, sizeof(cbor_item_t *), new_chunk_capacity); + cbor_item_t** new_chunks_data = _cbor_realloc_multiple( + data->chunks, sizeof(cbor_item_t*), new_chunk_capacity); if (new_chunks_data == NULL) { return false; diff --git a/contrib/libcbor/src/cbor/bytestrings.h b/contrib/libcbor/src/cbor/bytestrings.h index cacd1adf95f3..59cae2add7f3 100644 --- a/contrib/libcbor/src/cbor/bytestrings.h +++ b/contrib/libcbor/src/cbor/bytestrings.h @@ -29,7 +29,7 @@ extern "C" { * @return length of the binary data. Zero if no chunk has been attached yet */ _CBOR_NODISCARD -CBOR_EXPORT size_t cbor_bytestring_length(const cbor_item_t *item); +CBOR_EXPORT size_t cbor_bytestring_length(const cbor_item_t* item); /** Is the byte string definite? * @@ -37,7 +37,7 @@ CBOR_EXPORT size_t cbor_bytestring_length(const cbor_item_t *item); * @return Is the byte string definite? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_bytestring_is_definite(const cbor_item_t *item); +CBOR_EXPORT bool cbor_bytestring_is_definite(const cbor_item_t* item); /** Is the byte string indefinite? * @@ -45,7 +45,7 @@ CBOR_EXPORT bool cbor_bytestring_is_definite(const cbor_item_t *item); * @return Is the byte string indefinite? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_bytestring_is_indefinite(const cbor_item_t *item); +CBOR_EXPORT bool cbor_bytestring_is_indefinite(const cbor_item_t* item); /** Get the handle to the binary data * @@ -58,7 +58,7 @@ CBOR_EXPORT bool cbor_bytestring_is_indefinite(const cbor_item_t *item); * yet. */ _CBOR_NODISCARD -CBOR_EXPORT cbor_mutable_data cbor_bytestring_handle(const cbor_item_t *item); +CBOR_EXPORT cbor_mutable_data cbor_bytestring_handle(const cbor_item_t* item); /** Set the handle to the binary data * @@ -69,7 +69,7 @@ CBOR_EXPORT cbor_mutable_data cbor_bytestring_handle(const cbor_item_t *item); * @param length Length of the data block */ CBOR_EXPORT void cbor_bytestring_set_handle( - cbor_item_t *item, cbor_mutable_data CBOR_RESTRICT_POINTER data, + cbor_item_t* item, cbor_mutable_data CBOR_RESTRICT_POINTER data, size_t length); /** Get the handle to the array of chunks @@ -81,8 +81,8 @@ CBOR_EXPORT void cbor_bytestring_set_handle( * @return array of #cbor_bytestring_chunk_count definite bytestrings */ _CBOR_NODISCARD -CBOR_EXPORT cbor_item_t **cbor_bytestring_chunks_handle( - const cbor_item_t *item); +CBOR_EXPORT cbor_item_t** cbor_bytestring_chunks_handle( + const cbor_item_t* item); /** Get the number of chunks this string consist of * @@ -90,7 +90,7 @@ CBOR_EXPORT cbor_item_t **cbor_bytestring_chunks_handle( * @return The chunk count. 0 for freshly created items. */ _CBOR_NODISCARD -CBOR_EXPORT size_t cbor_bytestring_chunk_count(const cbor_item_t *item); +CBOR_EXPORT size_t cbor_bytestring_chunk_count(const cbor_item_t* item); /** Appends a chunk to the bytestring * @@ -105,8 +105,8 @@ CBOR_EXPORT size_t cbor_bytestring_chunk_count(const cbor_item_t *item); * of `chunk` is not increased and the `item` is left intact. */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_bytestring_add_chunk(cbor_item_t *item, - cbor_item_t *chunk); +CBOR_EXPORT bool cbor_bytestring_add_chunk(cbor_item_t* item, + cbor_item_t* chunk); /** Creates a new definite byte string * @@ -117,7 +117,7 @@ CBOR_EXPORT bool cbor_bytestring_add_chunk(cbor_item_t *item, * @return `NULL` if memory allocation fails */ _CBOR_NODISCARD -CBOR_EXPORT cbor_item_t *cbor_new_definite_bytestring(void); +CBOR_EXPORT cbor_item_t* cbor_new_definite_bytestring(void); /** Creates a new indefinite byte string * @@ -128,7 +128,7 @@ CBOR_EXPORT cbor_item_t *cbor_new_definite_bytestring(void); * @return `NULL` if memory allocation fails */ _CBOR_NODISCARD -CBOR_EXPORT cbor_item_t *cbor_new_indefinite_bytestring(void); +CBOR_EXPORT cbor_item_t* cbor_new_indefinite_bytestring(void); /** Creates a new byte string and initializes it * @@ -141,7 +141,7 @@ CBOR_EXPORT cbor_item_t *cbor_new_indefinite_bytestring(void); * @return `NULL` if memory allocation fails */ _CBOR_NODISCARD -CBOR_EXPORT cbor_item_t *cbor_build_bytestring(cbor_data handle, size_t length); +CBOR_EXPORT cbor_item_t* cbor_build_bytestring(cbor_data handle, size_t length); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/callbacks.c b/contrib/libcbor/src/cbor/callbacks.c index bdf3f79eee69..2b58edaa0bc0 100644 --- a/contrib/libcbor/src/cbor/callbacks.c +++ b/contrib/libcbor/src/cbor/callbacks.c @@ -7,72 +7,72 @@ #include "callbacks.h" -void cbor_null_uint8_callback(void *_CBOR_UNUSED(_ctx), - uint8_t _CBOR_UNUSED(_val)) {} +void cbor_null_uint8_callback(void* _ctx _CBOR_UNUSED, + uint8_t _CBOR_UNUSED _val) {} -void cbor_null_uint16_callback(void *_CBOR_UNUSED(_ctx), - uint16_t _CBOR_UNUSED(_val)) {} +void cbor_null_uint16_callback(void* _ctx _CBOR_UNUSED, + uint16_t _CBOR_UNUSED _val) {} -void cbor_null_uint32_callback(void *_CBOR_UNUSED(_ctx), - uint32_t _CBOR_UNUSED(_val)) {} +void cbor_null_uint32_callback(void* _ctx _CBOR_UNUSED, + uint32_t _CBOR_UNUSED _val) {} -void cbor_null_uint64_callback(void *_CBOR_UNUSED(_ctx), - uint64_t _CBOR_UNUSED(_val)) {} +void cbor_null_uint64_callback(void* _ctx _CBOR_UNUSED, + uint64_t _CBOR_UNUSED _val) {} -void cbor_null_negint8_callback(void *_CBOR_UNUSED(_ctx), - uint8_t _CBOR_UNUSED(_val)) {} +void cbor_null_negint8_callback(void* _ctx _CBOR_UNUSED, + uint8_t _CBOR_UNUSED _val) {} -void cbor_null_negint16_callback(void *_CBOR_UNUSED(_ctx), - uint16_t _CBOR_UNUSED(_val)) {} +void cbor_null_negint16_callback(void* _ctx _CBOR_UNUSED, + uint16_t _CBOR_UNUSED _val) {} -void cbor_null_negint32_callback(void *_CBOR_UNUSED(_ctx), - uint32_t _CBOR_UNUSED(_val)) {} +void cbor_null_negint32_callback(void* _ctx _CBOR_UNUSED, + uint32_t _CBOR_UNUSED _val) {} -void cbor_null_negint64_callback(void *_CBOR_UNUSED(_ctx), - uint64_t _CBOR_UNUSED(_val)) {} +void cbor_null_negint64_callback(void* _ctx _CBOR_UNUSED, + uint64_t _CBOR_UNUSED _val) {} -void cbor_null_string_callback(void *_CBOR_UNUSED(_ctx), - cbor_data _CBOR_UNUSED(_val), - uint64_t _CBOR_UNUSED(_val2)) {} +void cbor_null_string_callback(void* _ctx _CBOR_UNUSED, + cbor_data _CBOR_UNUSED _val, + uint64_t _CBOR_UNUSED _val2) {} -void cbor_null_string_start_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_string_start_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_byte_string_callback(void *_CBOR_UNUSED(_ctx), - cbor_data _CBOR_UNUSED(_val), - uint64_t _CBOR_UNUSED(_val2)) {} +void cbor_null_byte_string_callback(void* _ctx _CBOR_UNUSED, + cbor_data _CBOR_UNUSED _val, + uint64_t _CBOR_UNUSED _val2) {} -void cbor_null_byte_string_start_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_byte_string_start_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_array_start_callback(void *_CBOR_UNUSED(_ctx), - uint64_t _CBOR_UNUSED(_val)) {} +void cbor_null_array_start_callback(void* _ctx _CBOR_UNUSED, + uint64_t _CBOR_UNUSED _val) {} -void cbor_null_indef_array_start_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_indef_array_start_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_map_start_callback(void *_CBOR_UNUSED(_ctx), - uint64_t _CBOR_UNUSED(_val)) {} +void cbor_null_map_start_callback(void* _ctx _CBOR_UNUSED, + uint64_t _CBOR_UNUSED _val) {} -void cbor_null_indef_map_start_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_indef_map_start_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_tag_callback(void *_CBOR_UNUSED(_ctx), - uint64_t _CBOR_UNUSED(_val)) {} +void cbor_null_tag_callback(void* _ctx _CBOR_UNUSED, + uint64_t _CBOR_UNUSED _val) {} -void cbor_null_float2_callback(void *_CBOR_UNUSED(_ctx), - float _CBOR_UNUSED(_val)) {} +void cbor_null_float2_callback(void* _ctx _CBOR_UNUSED, + float _CBOR_UNUSED _val) {} -void cbor_null_float4_callback(void *_CBOR_UNUSED(_ctx), - float _CBOR_UNUSED(_val)) {} +void cbor_null_float4_callback(void* _ctx _CBOR_UNUSED, + float _CBOR_UNUSED _val) {} -void cbor_null_float8_callback(void *_CBOR_UNUSED(_ctx), - double _CBOR_UNUSED(_val)) {} +void cbor_null_float8_callback(void* _ctx _CBOR_UNUSED, + double _CBOR_UNUSED _val) {} -void cbor_null_null_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_null_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_undefined_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_undefined_callback(void* _ctx _CBOR_UNUSED) {} -void cbor_null_boolean_callback(void *_CBOR_UNUSED(_ctx), - bool _CBOR_UNUSED(_val)) {} +void cbor_null_boolean_callback(void* _ctx _CBOR_UNUSED, + bool _CBOR_UNUSED _val) {} -void cbor_null_indef_break_callback(void *_CBOR_UNUSED(_ctx)) {} +void cbor_null_indef_break_callback(void* _ctx _CBOR_UNUSED) {} CBOR_EXPORT const struct cbor_callbacks cbor_empty_callbacks = { /* Type 0 - Unsigned integers */ diff --git a/contrib/libcbor/src/cbor/callbacks.h b/contrib/libcbor/src/cbor/callbacks.h index c7ae20568dc8..f5fab43bc711 100644 --- a/contrib/libcbor/src/cbor/callbacks.h +++ b/contrib/libcbor/src/cbor/callbacks.h @@ -18,34 +18,34 @@ extern "C" { #endif /** Callback prototype */ -typedef void (*cbor_int8_callback)(void *, uint8_t); +typedef void (*cbor_int8_callback)(void*, uint8_t); /** Callback prototype */ -typedef void (*cbor_int16_callback)(void *, uint16_t); +typedef void (*cbor_int16_callback)(void*, uint16_t); /** Callback prototype */ -typedef void (*cbor_int32_callback)(void *, uint32_t); +typedef void (*cbor_int32_callback)(void*, uint32_t); /** Callback prototype */ -typedef void (*cbor_int64_callback)(void *, uint64_t); +typedef void (*cbor_int64_callback)(void*, uint64_t); /** Callback prototype */ -typedef void (*cbor_simple_callback)(void *); +typedef void (*cbor_simple_callback)(void*); /** Callback prototype */ -typedef void (*cbor_string_callback)(void *, cbor_data, uint64_t); +typedef void (*cbor_string_callback)(void*, cbor_data, uint64_t); /** Callback prototype */ -typedef void (*cbor_collection_callback)(void *, uint64_t); +typedef void (*cbor_collection_callback)(void*, uint64_t); /** Callback prototype */ -typedef void (*cbor_float_callback)(void *, float); +typedef void (*cbor_float_callback)(void*, float); /** Callback prototype */ -typedef void (*cbor_double_callback)(void *, double); +typedef void (*cbor_double_callback)(void*, double); /** Callback prototype */ -typedef void (*cbor_bool_callback)(void *, bool); +typedef void (*cbor_bool_callback)(void*, bool); /** Callback bundle -- passed to the decoder */ struct cbor_callbacks { @@ -108,76 +108,76 @@ struct cbor_callbacks { }; /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_uint8_callback(void *, uint8_t); +CBOR_EXPORT void cbor_null_uint8_callback(void*, uint8_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_uint16_callback(void *, uint16_t); +CBOR_EXPORT void cbor_null_uint16_callback(void*, uint16_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_uint32_callback(void *, uint32_t); +CBOR_EXPORT void cbor_null_uint32_callback(void*, uint32_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_uint64_callback(void *, uint64_t); +CBOR_EXPORT void cbor_null_uint64_callback(void*, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_negint8_callback(void *, uint8_t); +CBOR_EXPORT void cbor_null_negint8_callback(void*, uint8_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_negint16_callback(void *, uint16_t); +CBOR_EXPORT void cbor_null_negint16_callback(void*, uint16_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_negint32_callback(void *, uint32_t); +CBOR_EXPORT void cbor_null_negint32_callback(void*, uint32_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_negint64_callback(void *, uint64_t); +CBOR_EXPORT void cbor_null_negint64_callback(void*, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_string_callback(void *, cbor_data, uint64_t); +CBOR_EXPORT void cbor_null_string_callback(void*, cbor_data, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_string_start_callback(void *); +CBOR_EXPORT void cbor_null_string_start_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_byte_string_callback(void *, cbor_data, uint64_t); +CBOR_EXPORT void cbor_null_byte_string_callback(void*, cbor_data, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_byte_string_start_callback(void *); +CBOR_EXPORT void cbor_null_byte_string_start_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_array_start_callback(void *, uint64_t); +CBOR_EXPORT void cbor_null_array_start_callback(void*, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_indef_array_start_callback(void *); +CBOR_EXPORT void cbor_null_indef_array_start_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_map_start_callback(void *, uint64_t); +CBOR_EXPORT void cbor_null_map_start_callback(void*, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_indef_map_start_callback(void *); +CBOR_EXPORT void cbor_null_indef_map_start_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_tag_callback(void *, uint64_t); +CBOR_EXPORT void cbor_null_tag_callback(void*, uint64_t); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_float2_callback(void *, float); +CBOR_EXPORT void cbor_null_float2_callback(void*, float); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_float4_callback(void *, float); +CBOR_EXPORT void cbor_null_float4_callback(void*, float); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_float8_callback(void *, double); +CBOR_EXPORT void cbor_null_float8_callback(void*, double); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_null_callback(void *); +CBOR_EXPORT void cbor_null_null_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_undefined_callback(void *); +CBOR_EXPORT void cbor_null_undefined_callback(void*); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_boolean_callback(void *, bool); +CBOR_EXPORT void cbor_null_boolean_callback(void*, bool); /** Dummy callback implementation - does nothing */ -CBOR_EXPORT void cbor_null_indef_break_callback(void *); +CBOR_EXPORT void cbor_null_indef_break_callback(void*); /** Dummy callback bundle - does nothing */ CBOR_EXPORT extern const struct cbor_callbacks cbor_empty_callbacks; diff --git a/contrib/libcbor/src/cbor/common.c b/contrib/libcbor/src/cbor/common.c index efbd37ed79d3..1931b572fb32 100644 --- a/contrib/libcbor/src/cbor/common.c +++ b/contrib/libcbor/src/cbor/common.c @@ -19,69 +19,75 @@ bool _cbor_enable_assert = true; #endif -bool cbor_isa_uint(const cbor_item_t *item) { - return item->type == CBOR_TYPE_UINT; +cbor_type cbor_typeof(const cbor_item_t* item) { + CBOR_ASSERT(item != NULL); + CBOR_ASSERT_VALID_TYPE(item->type); + return item->type; } -bool cbor_isa_negint(const cbor_item_t *item) { - return item->type == CBOR_TYPE_NEGINT; +bool cbor_isa_uint(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_UINT; } -bool cbor_isa_bytestring(const cbor_item_t *item) { - return item->type == CBOR_TYPE_BYTESTRING; +bool cbor_isa_negint(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_NEGINT; } -bool cbor_isa_string(const cbor_item_t *item) { - return item->type == CBOR_TYPE_STRING; +bool cbor_isa_bytestring(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_BYTESTRING; } -bool cbor_isa_array(const cbor_item_t *item) { - return item->type == CBOR_TYPE_ARRAY; +bool cbor_isa_string(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_STRING; } -bool cbor_isa_map(const cbor_item_t *item) { - return item->type == CBOR_TYPE_MAP; +bool cbor_isa_array(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_ARRAY; } -bool cbor_isa_tag(const cbor_item_t *item) { - return item->type == CBOR_TYPE_TAG; +bool cbor_isa_map(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_MAP; } -bool cbor_isa_float_ctrl(const cbor_item_t *item) { - return item->type == CBOR_TYPE_FLOAT_CTRL; +bool cbor_isa_tag(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_TAG; } -cbor_type cbor_typeof(const cbor_item_t *item) { return item->type; } +bool cbor_isa_float_ctrl(const cbor_item_t* item) { + return cbor_typeof(item) == CBOR_TYPE_FLOAT_CTRL; +} -bool cbor_is_int(const cbor_item_t *item) { +bool cbor_is_int(const cbor_item_t* item) { return cbor_isa_uint(item) || cbor_isa_negint(item); } -bool cbor_is_bool(const cbor_item_t *item) { - return cbor_isa_float_ctrl(item) && +bool cbor_is_bool(const cbor_item_t* item) { + return cbor_isa_float_ctrl(item) && cbor_float_ctrl_is_ctrl(item) && (cbor_ctrl_value(item) == CBOR_CTRL_FALSE || cbor_ctrl_value(item) == CBOR_CTRL_TRUE); } -bool cbor_is_null(const cbor_item_t *item) { - return cbor_isa_float_ctrl(item) && cbor_ctrl_value(item) == CBOR_CTRL_NULL; +bool cbor_is_null(const cbor_item_t* item) { + return cbor_isa_float_ctrl(item) && cbor_float_ctrl_is_ctrl(item) && + cbor_ctrl_value(item) == CBOR_CTRL_NULL; } -bool cbor_is_undef(const cbor_item_t *item) { - return cbor_isa_float_ctrl(item) && cbor_ctrl_value(item) == CBOR_CTRL_UNDEF; +bool cbor_is_undef(const cbor_item_t* item) { + return cbor_isa_float_ctrl(item) && cbor_float_ctrl_is_ctrl(item) && + cbor_ctrl_value(item) == CBOR_CTRL_UNDEF; } -bool cbor_is_float(const cbor_item_t *item) { +bool cbor_is_float(const cbor_item_t* item) { return cbor_isa_float_ctrl(item) && !cbor_float_ctrl_is_ctrl(item); } -cbor_item_t *cbor_incref(cbor_item_t *item) { +cbor_item_t* cbor_incref(cbor_item_t* item) { item->refcount++; return item; } -void cbor_decref(cbor_item_t **item_ref) { - cbor_item_t *item = *item_ref; +void cbor_decref(cbor_item_t** item_ref) { + cbor_item_t* item = *item_ref; CBOR_ASSERT(item->refcount > 0); if (--item->refcount == 0) { switch (item->type) { @@ -95,11 +101,10 @@ void cbor_decref(cbor_item_t **item_ref) { _cbor_free(item->data); } else { /* We need to decref all chunks */ - cbor_item_t **handle = cbor_bytestring_chunks_handle(item); + cbor_item_t** handle = cbor_bytestring_chunks_handle(item); for (size_t i = 0; i < cbor_bytestring_chunk_count(item); i++) cbor_decref(&handle[i]); - _cbor_free( - ((struct cbor_indefinite_string_data *)item->data)->chunks); + _cbor_free(((struct cbor_indefinite_string_data*)item->data)->chunks); _cbor_free(item->data); } break; @@ -109,18 +114,17 @@ void cbor_decref(cbor_item_t **item_ref) { _cbor_free(item->data); } else { /* We need to decref all chunks */ - cbor_item_t **handle = cbor_string_chunks_handle(item); + cbor_item_t** handle = cbor_string_chunks_handle(item); for (size_t i = 0; i < cbor_string_chunk_count(item); i++) cbor_decref(&handle[i]); - _cbor_free( - ((struct cbor_indefinite_string_data *)item->data)->chunks); + _cbor_free(((struct cbor_indefinite_string_data*)item->data)->chunks); _cbor_free(item->data); } break; } case CBOR_TYPE_ARRAY: { /* Get all items and decref them */ - cbor_item_t **handle = cbor_array_handle(item); + cbor_item_t** handle = cbor_array_handle(item); size_t size = cbor_array_size(item); for (size_t i = 0; i < size; i++) if (handle[i] != NULL) cbor_decref(&handle[i]); @@ -128,7 +132,7 @@ void cbor_decref(cbor_item_t **item_ref) { break; } case CBOR_TYPE_MAP: { - struct cbor_pair *handle = cbor_map_handle(item); + struct cbor_pair* handle = cbor_map_handle(item); for (size_t i = 0; i < item->metadata.map_metadata.end_ptr; i++, handle++) { cbor_decref(&handle->key); @@ -153,11 +157,11 @@ void cbor_decref(cbor_item_t **item_ref) { } } -void cbor_intermediate_decref(cbor_item_t *item) { cbor_decref(&item); } +void cbor_intermediate_decref(cbor_item_t* item) { cbor_decref(&item); } -size_t cbor_refcount(const cbor_item_t *item) { return item->refcount; } +size_t cbor_refcount(const cbor_item_t* item) { return item->refcount; } -cbor_item_t *cbor_move(cbor_item_t *item) { +cbor_item_t* cbor_move(cbor_item_t* item) { item->refcount--; return item; } diff --git a/contrib/libcbor/src/cbor/common.h b/contrib/libcbor/src/cbor/common.h index 1d0b426cff4c..d16002d9b651 100644 --- a/contrib/libcbor/src/cbor/common.h +++ b/contrib/libcbor/src/cbor/common.h @@ -77,25 +77,42 @@ extern bool _cbor_enable_assert; } while (0) #endif +#define CBOR_ASSERT_VALID_TYPE(item_type) \ + CBOR_ASSERT(item_type >= CBOR_TYPE_UINT && item_type <= CBOR_TYPE_FLOAT_CTRL); + #define _CBOR_TO_STR_(x) #x #define _CBOR_TO_STR(x) _CBOR_TO_STR_(x) /* enables proper double expansion */ +#ifdef CBOR_HAS_NODISCARD_ATTRIBUTE +#define CBOR_NODISCARD [[nodiscard]] +#else +#define CBOR_NODISCARD +#endif + #ifdef __GNUC__ -#define _CBOR_UNUSED(x) __attribute__((__unused__)) x -// TODO(https://github.com/PJK/libcbor/issues/247): Prefer [[nodiscard]] if -// available +#define _CBOR_UNUSED __attribute__((__unused__)) +// Fall back to __attribute__((warn_unused_result)) if we don't have +// [[nodiscard]] +#ifndef CBOR_HAS_NODISCARD_ATTRIBUTE #define _CBOR_NODISCARD __attribute__((warn_unused_result)) +#endif #elif defined(_MSC_VER) -#define _CBOR_UNUSED(x) __pragma(warning(suppress : 4100 4101)) x +#define _CBOR_UNUSED __pragma(warning(suppress : 4100 4101)) #define _CBOR_NODISCARD #else -#define _CBOR_UNUSED(x) x +#define _CBOR_UNUSED #define _CBOR_NODISCARD #endif -typedef void *(*_cbor_malloc_t)(size_t); -typedef void *(*_cbor_realloc_t)(void *, size_t); -typedef void (*_cbor_free_t)(void *); +#ifdef CBOR_HAS_BUILTIN_UNREACHABLE +#define _CBOR_UNREACHABLE __builtin_unreachable() +#else +#define _CBOR_UNREACHABLE +#endif + +typedef void* (*_cbor_malloc_t)(size_t); +typedef void* (*_cbor_realloc_t)(void*, size_t); +typedef void (*_cbor_free_t)(void*); CBOR_EXPORT extern _cbor_malloc_t _cbor_malloc; CBOR_EXPORT extern _cbor_realloc_t _cbor_realloc; @@ -109,7 +126,8 @@ CBOR_EXPORT extern _cbor_free_t _cbor_free; } \ } while (0) -// Macro to short-circuit builders when memory allocation of nested data fails +// Macro to short-circuit builders when memory allocation of nested data +// fails #define _CBOR_DEPENDENT_NOTNULL(cbor_item, pointer) \ do { \ if (pointer == NULL) { \ @@ -120,18 +138,21 @@ CBOR_EXPORT extern _cbor_free_t _cbor_free; /** Sets the memory management routines to use. * - * By default, libcbor will use the standard library `malloc`, `realloc`, and - * `free`. + * By default, libcbor will use the standard library `malloc`, `realloc`, + * and `free`. * * \rst - * .. warning:: This function modifies the global state and should therefore be - * used accordingly. Changing the memory handlers while allocated items exist - * will result in a ``free``/``malloc`` mismatch. This function is not thread - * safe with respect to both itself and all the other *libcbor* functions that - * work with the heap. + * .. warning:: + * This function modifies the global state and should + * therefore be used accordingly. Changing the memory handlers while + * allocated items exist will result in a ``free``/``malloc`` mismatch. + * This function is not thread safe with respect to both itself and all + * the other *libcbor* functions that work with the heap. + * + * .. note:: + * `realloc` implementation must correctly support `NULL` + * reallocation (see e.g. http://en.cppreference.com/w/c/memory/realloc) * - * .. note:: `realloc` implementation must correctly support `NULL` reallocation - * (see e.g. http://en.cppreference.com/w/c/memory/realloc) * \endrst * * @param custom_malloc malloc implementation @@ -155,7 +176,7 @@ CBOR_EXPORT void cbor_set_allocs(_cbor_malloc_t custom_malloc, */ _CBOR_NODISCARD CBOR_EXPORT cbor_type cbor_typeof( - const cbor_item_t *item); /* Will be inlined iff link-time opt is enabled */ + const cbor_item_t* item); /* Will be inlined iff link-time opt is enabled */ /* Standard CBOR Major item types */ @@ -164,56 +185,56 @@ CBOR_EXPORT cbor_type cbor_typeof( * @return Is the item an #CBOR_TYPE_UINT? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_uint(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_uint(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_NEGINT? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_negint(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_negint(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_BYTESTRING? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_bytestring(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_bytestring(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_STRING? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_string(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_string(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item an #CBOR_TYPE_ARRAY? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_array(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_array(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_MAP? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_map(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_map(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_TAG? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_tag(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_tag(const cbor_item_t* item); /** Does the item have the appropriate major type? * @param item the item * @return Is the item a #CBOR_TYPE_FLOAT_CTRL? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_isa_float_ctrl(const cbor_item_t *item); +CBOR_EXPORT bool cbor_isa_float_ctrl(const cbor_item_t* item); /* Practical types with respect to their semantics (but not tag values) */ @@ -222,47 +243,48 @@ CBOR_EXPORT bool cbor_isa_float_ctrl(const cbor_item_t *item); * @return Is the item an integer, either positive or negative? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_is_int(const cbor_item_t *item); +CBOR_EXPORT bool cbor_is_int(const cbor_item_t* item); /** Is the item an a floating point number? * @param item the item * @return Is the item a floating point number? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_is_float(const cbor_item_t *item); +CBOR_EXPORT bool cbor_is_float(const cbor_item_t* item); /** Is the item an a boolean? * @param item the item * @return Is the item a boolean? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_is_bool(const cbor_item_t *item); +CBOR_EXPORT bool cbor_is_bool(const cbor_item_t* item); /** Does this item represent `null` * * \rst - * .. warning:: This is in no way related to the value of the pointer. Passing a - * null pointer will most likely result in a crash. + * .. warning:: + * This is in no way related to the value of the pointer. + * Passing a null pointer will most likely result in a crash. * \endrst * * @param item the item * @return Is the item (CBOR logical) null? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_is_null(const cbor_item_t *item); +CBOR_EXPORT bool cbor_is_null(const cbor_item_t* item); /** Does this item represent `undefined` * * \rst - * .. warning:: Care must be taken to distinguish nulls and undefined values in - * C. + * .. warning:: + * Care must be taken to distinguish nulls and undefined values in C. * \endrst * * @param item the item * @return Is the item (CBOR logical) undefined? */ _CBOR_NODISCARD -CBOR_EXPORT bool cbor_is_undef(const cbor_item_t *item); +CBOR_EXPORT bool cbor_is_undef(const cbor_item_t* item); /* * ============================================================================ @@ -272,38 +294,41 @@ CBOR_EXPORT bool cbor_is_undef(const cbor_item_t *item); /** Increases the item's reference count by one * - * Constant complexity; items referring to this one or items being referred to - * are not updated. + * Constant complexity; items referring to this one or items being + * referred to are not updated. * * This function can be used to extend reference counting to client code. * * @param item Reference to an item * @return The input \p item */ -CBOR_EXPORT cbor_item_t *cbor_incref(cbor_item_t *item); +CBOR_EXPORT cbor_item_t* cbor_incref(cbor_item_t* item); -/** Decreases the item's reference count by one, deallocating the item if needed +/** Decreases the item's reference count by one, deallocating the item if + * needed * - * In case the item is deallocated, the reference count of all items this item - * references will also be #cbor_decref 'ed recursively. + * In case the item is deallocated, the reference count of all items this + * item references will also be #cbor_decref 'ed recursively. * * @param item Reference to an item. Will be set to `NULL` if deallocated */ -CBOR_EXPORT void cbor_decref(cbor_item_t **item); +CBOR_EXPORT void cbor_decref(cbor_item_t** item); -/** Decreases the item's reference count by one, deallocating the item if needed - * - * Convenience wrapper for #cbor_decref when its set-to-null behavior is not +/** Decreases the item's reference count by one, deallocating the item if * needed * + * Convenience wrapper for #cbor_decref when its set-to-null behavior is + * not needed + * * @param item Reference to an item */ -CBOR_EXPORT void cbor_intermediate_decref(cbor_item_t *item); +CBOR_EXPORT void cbor_intermediate_decref(cbor_item_t* item); /** Get the item's reference count * * \rst - * .. warning:: This does *not* account for transitive references. + * .. warning:: + * This does *not* account for transitive references. * \endrst * * @todo Add some inline examples for reference counting @@ -312,25 +337,26 @@ CBOR_EXPORT void cbor_intermediate_decref(cbor_item_t *item); * @return the reference count */ _CBOR_NODISCARD -CBOR_EXPORT size_t cbor_refcount(const cbor_item_t *item); +CBOR_EXPORT size_t cbor_refcount(const cbor_item_t* item); /** Provides CPP-like move construct * - * Decreases the reference count by one, but does not deallocate the item even - * if its refcount reaches zero. This is useful for passing intermediate values - * to functions that increase reference count. Should only be used with - * functions that `incref` their arguments. + * Decreases the reference count by one, but does not deallocate the item + * even if its refcount reaches zero. This is useful for passing + * intermediate values to functions that increase reference count. Should + * only be used with functions that `incref` their arguments. * * \rst - * .. warning:: If the item is moved without correctly increasing the reference - * count afterwards, the memory will be leaked. + * .. warning:: + * If the item is moved without correctly increasing the + * reference count afterwards, the memory will be leaked. * \endrst * * @param item Reference to an item * @return the item with reference count decreased by one */ _CBOR_NODISCARD -CBOR_EXPORT cbor_item_t *cbor_move(cbor_item_t *item); +CBOR_EXPORT cbor_item_t* cbor_move(cbor_item_t* item); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/encoding.c b/contrib/libcbor/src/cbor/encoding.c index 9d931d17570f..76be4de9da3d 100644 --- a/contrib/libcbor/src/cbor/encoding.c +++ b/contrib/libcbor/src/cbor/encoding.c @@ -6,64 +6,67 @@ */ #include "encoding.h" + +#include <math.h> + #include "internal/encoders.h" -size_t cbor_encode_uint8(uint8_t value, unsigned char *buffer, +size_t cbor_encode_uint8(uint8_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint8(value, buffer, buffer_size, 0x00); } -size_t cbor_encode_uint16(uint16_t value, unsigned char *buffer, +size_t cbor_encode_uint16(uint16_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint16(value, buffer, buffer_size, 0x00); } -size_t cbor_encode_uint32(uint32_t value, unsigned char *buffer, +size_t cbor_encode_uint32(uint32_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint32(value, buffer, buffer_size, 0x00); } -size_t cbor_encode_uint64(uint64_t value, unsigned char *buffer, +size_t cbor_encode_uint64(uint64_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint64(value, buffer, buffer_size, 0x00); } -size_t cbor_encode_uint(uint64_t value, unsigned char *buffer, +size_t cbor_encode_uint(uint64_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint(value, buffer, buffer_size, 0x00); } -size_t cbor_encode_negint8(uint8_t value, unsigned char *buffer, +size_t cbor_encode_negint8(uint8_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint8(value, buffer, buffer_size, 0x20); } -size_t cbor_encode_negint16(uint16_t value, unsigned char *buffer, +size_t cbor_encode_negint16(uint16_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint16(value, buffer, buffer_size, 0x20); } -size_t cbor_encode_negint32(uint32_t value, unsigned char *buffer, +size_t cbor_encode_negint32(uint32_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint32(value, buffer, buffer_size, 0x20); } -size_t cbor_encode_negint64(uint64_t value, unsigned char *buffer, +size_t cbor_encode_negint64(uint64_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint64(value, buffer, buffer_size, 0x20); } -size_t cbor_encode_negint(uint64_t value, unsigned char *buffer, +size_t cbor_encode_negint(uint64_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint(value, buffer, buffer_size, 0x20); } -size_t cbor_encode_bytestring_start(size_t length, unsigned char *buffer, +size_t cbor_encode_bytestring_start(size_t length, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint((size_t)length, buffer, buffer_size, 0x40); } -size_t _cbor_encode_byte(uint8_t value, unsigned char *buffer, +size_t _cbor_encode_byte(uint8_t value, unsigned char* buffer, size_t buffer_size) { if (buffer_size >= 1) { buffer[0] = value; @@ -72,60 +75,61 @@ size_t _cbor_encode_byte(uint8_t value, unsigned char *buffer, return 0; } -size_t cbor_encode_indef_bytestring_start(unsigned char *buffer, +size_t cbor_encode_indef_bytestring_start(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0x5F, buffer, buffer_size); } -size_t cbor_encode_string_start(size_t length, unsigned char *buffer, +size_t cbor_encode_string_start(size_t length, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint((size_t)length, buffer, buffer_size, 0x60); } -size_t cbor_encode_indef_string_start(unsigned char *buffer, +size_t cbor_encode_indef_string_start(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0x7F, buffer, buffer_size); } -size_t cbor_encode_array_start(size_t length, unsigned char *buffer, +size_t cbor_encode_array_start(size_t length, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint((size_t)length, buffer, buffer_size, 0x80); } -size_t cbor_encode_indef_array_start(unsigned char *buffer, +size_t cbor_encode_indef_array_start(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0x9F, buffer, buffer_size); } -size_t cbor_encode_map_start(size_t length, unsigned char *buffer, +size_t cbor_encode_map_start(size_t length, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint((size_t)length, buffer, buffer_size, 0xA0); } -size_t cbor_encode_indef_map_start(unsigned char *buffer, size_t buffer_size) { +size_t cbor_encode_indef_map_start(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0xBF, buffer, buffer_size); } -size_t cbor_encode_tag(uint64_t value, unsigned char *buffer, +size_t cbor_encode_tag(uint64_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint(value, buffer, buffer_size, 0xC0); } -size_t cbor_encode_bool(bool value, unsigned char *buffer, size_t buffer_size) { +size_t cbor_encode_bool(bool value, unsigned char* buffer, size_t buffer_size) { return value ? _cbor_encode_byte(0xF5, buffer, buffer_size) : _cbor_encode_byte(0xF4, buffer, buffer_size); } -size_t cbor_encode_null(unsigned char *buffer, size_t buffer_size) { +size_t cbor_encode_null(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0xF6, buffer, buffer_size); } -size_t cbor_encode_undef(unsigned char *buffer, size_t buffer_size) { +size_t cbor_encode_undef(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0xF7, buffer, buffer_size); } -size_t cbor_encode_half(float value, unsigned char *buffer, +size_t cbor_encode_half(float value, unsigned char* buffer, size_t buffer_size) { + // TODO: Broken on systems that do not use IEEE 754 /* Assuming value is normalized */ uint32_t val = ((union _cbor_float_helper){.as_float = value}).as_uint; uint16_t res; @@ -134,11 +138,8 @@ size_t cbor_encode_half(float value, unsigned char *buffer, uint32_t mant = val & 0x7FFFFFu; /* 0b0000_0000_0111_1111_1111_1111_1111_1111 */ if (exp == 0xFF) { /* Infinity or NaNs */ - if (value != value) { - // We discard information bits in half-float NaNs. This is - // not required for the core CBOR protocol (it is only a suggestion in - // Section 3.9). - // See https://github.com/PJK/libcbor/issues/215 + if (isnan(value)) { + // Note: Values of signaling NaNs are discarded. See `cbor_encode_single`. res = (uint16_t)0x007e00; } else { // If the mantissa is non-zero, we have a NaN, but those are handled @@ -176,25 +177,38 @@ size_t cbor_encode_half(float value, unsigned char *buffer, return _cbor_encode_uint16(res, buffer, buffer_size, 0xE0); } -size_t cbor_encode_single(float value, unsigned char *buffer, +size_t cbor_encode_single(float value, unsigned char* buffer, size_t buffer_size) { + // Note: Values of signaling NaNs are discarded. There is no standard + // way to extract it without assumptions about the internal float + // representation. + if (isnan(value)) { + return _cbor_encode_uint32(0x7FC0 << 16, buffer, buffer_size, 0xE0); + } + // TODO: Broken on systems that do not use IEEE 754 return _cbor_encode_uint32( ((union _cbor_float_helper){.as_float = value}).as_uint, buffer, buffer_size, 0xE0); } -size_t cbor_encode_double(double value, unsigned char *buffer, +size_t cbor_encode_double(double value, unsigned char* buffer, size_t buffer_size) { + // Note: Values of signaling NaNs are discarded. See `cbor_encode_single`. + if (isnan(value)) { + return _cbor_encode_uint64((uint64_t)0x7FF8 << 48, buffer, buffer_size, + 0xE0); + } + // TODO: Broken on systems that do not use IEEE 754 return _cbor_encode_uint64( ((union _cbor_double_helper){.as_double = value}).as_uint, buffer, buffer_size, 0xE0); } -size_t cbor_encode_break(unsigned char *buffer, size_t buffer_size) { +size_t cbor_encode_break(unsigned char* buffer, size_t buffer_size) { return _cbor_encode_byte(0xFF, buffer, buffer_size); } -size_t cbor_encode_ctrl(uint8_t value, unsigned char *buffer, +size_t cbor_encode_ctrl(uint8_t value, unsigned char* buffer, size_t buffer_size) { return _cbor_encode_uint8(value, buffer, buffer_size, 0xE0); } diff --git a/contrib/libcbor/src/cbor/encoding.h b/contrib/libcbor/src/cbor/encoding.h index bcc04f8a98e5..c05bbc545737 100644 --- a/contrib/libcbor/src/cbor/encoding.h +++ b/contrib/libcbor/src/cbor/encoding.h @@ -27,76 +27,72 @@ extern "C" { * case it is not modified). */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint8(uint8_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint8(uint8_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint16(uint16_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint16(uint16_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint32(uint32_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint32(uint32_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint64(uint64_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint64(uint64_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint(uint64_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_uint(uint64_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint8(uint8_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint8(uint8_t, unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint16(uint16_t, - unsigned char *, - size_t); + unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint32(uint32_t, - unsigned char *, - size_t); + unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint64(uint64_t, - unsigned char *, - size_t); + unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint(uint64_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_negint(uint64_t, unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_bytestring_start(size_t, - unsigned char *, + unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t -cbor_encode_indef_bytestring_start(unsigned char *, size_t); +cbor_encode_indef_bytestring_start(unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_string_start(size_t, - unsigned char *, + unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t -cbor_encode_indef_string_start(unsigned char *, size_t); +cbor_encode_indef_string_start(unsigned char*, size_t); _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_array_start(size_t, - unsigned char *, + unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t -cbor_encode_indef_array_start(unsigned char *, size_t); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_indef_array_start(unsigned char*, + size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_map_start(size_t, - unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_map_start(size_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_indef_map_start(unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_indef_map_start(unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_tag(uint64_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_tag(uint64_t, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_bool(bool, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_bool(bool, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_null(unsigned char *, size_t); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_null(unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_undef(unsigned char *, size_t); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_undef(unsigned char*, size_t); /** Encodes a half-precision float * @@ -118,19 +114,28 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_undef(unsigned char *, size_t); * lost. * - In all other cases, the sign bit, the exponent, and 10 most significant * bits of the significand are kept + * + * Note: Signaling NaNs are encoded as a standard, "quiet" NaN. */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_half(float, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_half(float, unsigned char*, size_t); - -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_single(float, unsigned char *, +/** Encodes a single precision float + * + * Note: Signaling NaNs are encoded as a standard, "quiet" NaN. + */ +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_single(float, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_double(double, unsigned char *, +/** Encodes a double precision float + * + * Note: Signaling NaNs are encoded as a standard, "quiet" NaN. + */ +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_double(double, unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_break(unsigned char *, size_t); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_break(unsigned char*, size_t); -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_ctrl(uint8_t, unsigned char *, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_encode_ctrl(uint8_t, unsigned char*, size_t); #ifdef __cplusplus diff --git a/contrib/libcbor/src/cbor/floats_ctrls.c b/contrib/libcbor/src/cbor/floats_ctrls.c index 57bf477d4d3d..cae4abee2186 100644 --- a/contrib/libcbor/src/cbor/floats_ctrls.c +++ b/contrib/libcbor/src/cbor/floats_ctrls.c @@ -9,43 +9,44 @@ #include <math.h> #include "assert.h" -cbor_float_width cbor_float_get_width(const cbor_item_t *item) { +cbor_float_width cbor_float_get_width(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_float_ctrl(item)); return item->metadata.float_ctrl_metadata.width; } -uint8_t cbor_ctrl_value(const cbor_item_t *item) { +uint8_t cbor_ctrl_value(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_float_ctrl(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_0); return item->metadata.float_ctrl_metadata.ctrl; } -bool cbor_float_ctrl_is_ctrl(const cbor_item_t *item) { +bool cbor_float_ctrl_is_ctrl(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_float_ctrl(item)); return cbor_float_get_width(item) == CBOR_FLOAT_0; } -float cbor_float_get_float2(const cbor_item_t *item) { +float cbor_float_get_float2(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_16); - return *(float *)item->data; + return *(float*)item->data; } -float cbor_float_get_float4(const cbor_item_t *item) { +float cbor_float_get_float4(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_32); - return *(float *)item->data; + return *(float*)item->data; } -double cbor_float_get_float8(const cbor_item_t *item) { +double cbor_float_get_float8(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_64); - return *(double *)item->data; + return *(double*)item->data; } -double cbor_float_get_float(const cbor_item_t *item) { - CBOR_ASSERT(cbor_is_float(item)); - // cppcheck-suppress missingReturn +double cbor_float_get_float(const cbor_item_t* item) { + CBOR_ASSERT(cbor_isa_float_ctrl(item)); + CBOR_ASSERT(cbor_float_get_width(item) >= CBOR_FLOAT_0 && + cbor_float_get_width(item) <= CBOR_FLOAT_64); switch (cbor_float_get_width(item)) { case CBOR_FLOAT_0: return NAN; @@ -55,46 +56,49 @@ double cbor_float_get_float(const cbor_item_t *item) { return cbor_float_get_float4(item); case CBOR_FLOAT_64: return cbor_float_get_float8(item); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } -bool cbor_get_bool(const cbor_item_t *item) { +bool cbor_get_bool(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_bool(item)); return item->metadata.float_ctrl_metadata.ctrl == CBOR_CTRL_TRUE; } -void cbor_set_float2(cbor_item_t *item, float value) { +void cbor_set_float2(cbor_item_t* item, float value) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_16); - *((float *)item->data) = value; + *((float*)item->data) = value; } -void cbor_set_float4(cbor_item_t *item, float value) { +void cbor_set_float4(cbor_item_t* item, float value) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_32); - *((float *)item->data) = value; + *((float*)item->data) = value; } -void cbor_set_float8(cbor_item_t *item, double value) { +void cbor_set_float8(cbor_item_t* item, double value) { CBOR_ASSERT(cbor_is_float(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_64); - *((double *)item->data) = value; + *((double*)item->data) = value; } -void cbor_set_ctrl(cbor_item_t *item, uint8_t value) { +void cbor_set_ctrl(cbor_item_t* item, uint8_t value) { CBOR_ASSERT(cbor_isa_float_ctrl(item)); CBOR_ASSERT(cbor_float_get_width(item) == CBOR_FLOAT_0); item->metadata.float_ctrl_metadata.ctrl = value; } -void cbor_set_bool(cbor_item_t *item, bool value) { +void cbor_set_bool(cbor_item_t* item, bool value) { CBOR_ASSERT(cbor_is_bool(item)); item->metadata.float_ctrl_metadata.ctrl = value ? CBOR_CTRL_TRUE : CBOR_CTRL_FALSE; } -cbor_item_t *cbor_new_ctrl(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_ctrl(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ @@ -106,83 +110,83 @@ cbor_item_t *cbor_new_ctrl(void) { return item; } -cbor_item_t *cbor_new_float2(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 4); +cbor_item_t* cbor_new_float2(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 4); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .type = CBOR_TYPE_FLOAT_CTRL, - .data = (unsigned char *)item + sizeof(cbor_item_t), + .data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.float_ctrl_metadata = {.width = CBOR_FLOAT_16}}}; return item; } -cbor_item_t *cbor_new_float4(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 4); +cbor_item_t* cbor_new_float4(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 4); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .type = CBOR_TYPE_FLOAT_CTRL, - .data = (unsigned char *)item + sizeof(cbor_item_t), + .data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.float_ctrl_metadata = {.width = CBOR_FLOAT_32}}}; return item; } -cbor_item_t *cbor_new_float8(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 8); +cbor_item_t* cbor_new_float8(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 8); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .type = CBOR_TYPE_FLOAT_CTRL, - .data = (unsigned char *)item + sizeof(cbor_item_t), + .data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.float_ctrl_metadata = {.width = CBOR_FLOAT_64}}}; return item; } -cbor_item_t *cbor_new_null(void) { - cbor_item_t *item = cbor_new_ctrl(); +cbor_item_t* cbor_new_null(void) { + cbor_item_t* item = cbor_new_ctrl(); _CBOR_NOTNULL(item); cbor_set_ctrl(item, CBOR_CTRL_NULL); return item; } -cbor_item_t *cbor_new_undef(void) { - cbor_item_t *item = cbor_new_ctrl(); +cbor_item_t* cbor_new_undef(void) { + cbor_item_t* item = cbor_new_ctrl(); _CBOR_NOTNULL(item); cbor_set_ctrl(item, CBOR_CTRL_UNDEF); return item; } -cbor_item_t *cbor_build_bool(bool value) { +cbor_item_t* cbor_build_bool(bool value) { return cbor_build_ctrl(value ? CBOR_CTRL_TRUE : CBOR_CTRL_FALSE); } -cbor_item_t *cbor_build_float2(float value) { - cbor_item_t *item = cbor_new_float2(); +cbor_item_t* cbor_build_float2(float value) { + cbor_item_t* item = cbor_new_float2(); _CBOR_NOTNULL(item); cbor_set_float2(item, value); return item; } -cbor_item_t *cbor_build_float4(float value) { - cbor_item_t *item = cbor_new_float4(); +cbor_item_t* cbor_build_float4(float value) { + cbor_item_t* item = cbor_new_float4(); _CBOR_NOTNULL(item); cbor_set_float4(item, value); return item; } -cbor_item_t *cbor_build_float8(double value) { - cbor_item_t *item = cbor_new_float8(); +cbor_item_t* cbor_build_float8(double value) { + cbor_item_t* item = cbor_new_float8(); _CBOR_NOTNULL(item); cbor_set_float8(item, value); return item; } -cbor_item_t *cbor_build_ctrl(uint8_t value) { - cbor_item_t *item = cbor_new_ctrl(); +cbor_item_t* cbor_build_ctrl(uint8_t value) { + cbor_item_t* item = cbor_new_ctrl(); _CBOR_NOTNULL(item); cbor_set_ctrl(item, value); return item; diff --git a/contrib/libcbor/src/cbor/floats_ctrls.h b/contrib/libcbor/src/cbor/floats_ctrls.h index 335eab8328be..1c1d0ba5b494 100644 --- a/contrib/libcbor/src/cbor/floats_ctrls.h +++ b/contrib/libcbor/src/cbor/floats_ctrls.h @@ -27,7 +27,7 @@ extern "C" { * @return Is this a ctrl value? */ _CBOR_NODISCARD CBOR_EXPORT bool cbor_float_ctrl_is_ctrl( - const cbor_item_t *item); + const cbor_item_t* item); /** Get the float width * @@ -35,7 +35,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_float_ctrl_is_ctrl( * @return The width. */ _CBOR_NODISCARD CBOR_EXPORT cbor_float_width -cbor_float_get_width(const cbor_item_t *item); +cbor_float_get_width(const cbor_item_t* item); /** Get a half precision float * @@ -45,7 +45,7 @@ cbor_float_get_width(const cbor_item_t *item); * @return half precision value */ _CBOR_NODISCARD CBOR_EXPORT float cbor_float_get_float2( - const cbor_item_t *item); + const cbor_item_t* item); /** Get a single precision float * @@ -55,7 +55,7 @@ _CBOR_NODISCARD CBOR_EXPORT float cbor_float_get_float2( * @return single precision value */ _CBOR_NODISCARD CBOR_EXPORT float cbor_float_get_float4( - const cbor_item_t *item); + const cbor_item_t* item); /** Get a double precision float * @@ -65,7 +65,7 @@ _CBOR_NODISCARD CBOR_EXPORT float cbor_float_get_float4( * @return double precision value */ _CBOR_NODISCARD CBOR_EXPORT double cbor_float_get_float8( - const cbor_item_t *item); + const cbor_item_t* item); /** Get the float value represented as double * @@ -75,14 +75,14 @@ _CBOR_NODISCARD CBOR_EXPORT double cbor_float_get_float8( * @return double precision value */ _CBOR_NODISCARD CBOR_EXPORT double cbor_float_get_float( - const cbor_item_t *item); + const cbor_item_t* item); /** Get value from a boolean ctrl item * * @param item A ctrl item * @return boolean value */ -_CBOR_NODISCARD CBOR_EXPORT bool cbor_get_bool(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT bool cbor_get_bool(const cbor_item_t* item); /** Constructs a new ctrl item * @@ -92,7 +92,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_get_bool(const cbor_item_t *item); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_ctrl(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_ctrl(void); /** Constructs a new float item * @@ -102,7 +102,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_ctrl(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float2(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_float2(void); /** Constructs a new float item * @@ -112,7 +112,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float2(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float4(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_float4(void); /** Constructs a new float item * @@ -122,7 +122,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float4(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float8(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_float8(void); /** Constructs new null ctrl item * @@ -130,7 +130,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_float8(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_null(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_null(void); /** Constructs new undef ctrl item * @@ -138,7 +138,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_null(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_undef(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_undef(void); /** Constructs new boolean ctrl item * @@ -147,55 +147,56 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_undef(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_bool(bool value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_bool(bool value); /** Assign a control value * * \rst - * .. warning:: It is possible to produce an invalid CBOR value by assigning a - * invalid value using this mechanism. Please consult the standard before use. + * .. warning:: + * It is possible to produce an invalid CBOR value by assigning an invalid + * value using this mechanism. Please consult the standard before use. * \endrst * * @param item A ctrl item * @param value The simple value to assign. Please consult the standard for * allowed values */ -CBOR_EXPORT void cbor_set_ctrl(cbor_item_t *item, uint8_t value); +CBOR_EXPORT void cbor_set_ctrl(cbor_item_t* item, uint8_t value); /** Assign a boolean value to a boolean ctrl item * * @param item A ctrl item * @param value The simple value to assign. */ -CBOR_EXPORT void cbor_set_bool(cbor_item_t *item, bool value); +CBOR_EXPORT void cbor_set_bool(cbor_item_t* item, bool value); /** Assigns a float value * * @param item A half precision float * @param value The value to assign */ -CBOR_EXPORT void cbor_set_float2(cbor_item_t *item, float value); +CBOR_EXPORT void cbor_set_float2(cbor_item_t* item, float value); /** Assigns a float value * * @param item A single precision float * @param value The value to assign */ -CBOR_EXPORT void cbor_set_float4(cbor_item_t *item, float value); +CBOR_EXPORT void cbor_set_float4(cbor_item_t* item, float value); /** Assigns a float value * * @param item A double precision float * @param value The value to assign */ -CBOR_EXPORT void cbor_set_float8(cbor_item_t *item, double value); +CBOR_EXPORT void cbor_set_float8(cbor_item_t* item, double value); /** Reads the control value * * @param item A ctrl item * @return the simple value */ -_CBOR_NODISCARD CBOR_EXPORT uint8_t cbor_ctrl_value(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint8_t cbor_ctrl_value(const cbor_item_t* item); /** Constructs a new float * @@ -204,7 +205,7 @@ _CBOR_NODISCARD CBOR_EXPORT uint8_t cbor_ctrl_value(const cbor_item_t *item); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float2(float value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_float2(float value); /** Constructs a new float * @@ -213,7 +214,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float2(float value); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float4(float value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_float4(float value); /** Constructs a new float * @@ -222,7 +223,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float4(float value); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float8(double value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_float8(double value); /** Constructs a ctrl item * @@ -231,7 +232,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_float8(double value); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_ctrl(uint8_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_ctrl(uint8_t value); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/internal/builder_callbacks.c b/contrib/libcbor/src/cbor/internal/builder_callbacks.c index 257cef3adbd1..24e7708da3e5 100644 --- a/contrib/libcbor/src/cbor/internal/builder_callbacks.c +++ b/contrib/libcbor/src/cbor/internal/builder_callbacks.c @@ -21,8 +21,8 @@ // `_cbor_builder_append` takes ownership of `item`. If adding the item to // parent container fails, `item` will be deallocated to prevent memory. -void _cbor_builder_append(cbor_item_t *item, - struct _cbor_decoder_context *ctx) { +void _cbor_builder_append(cbor_item_t* item, + struct _cbor_decoder_context* ctx) { if (ctx->stack->size == 0) { /* Top level item */ ctx->root = item; @@ -49,7 +49,7 @@ void _cbor_builder_append(cbor_item_t *item, cbor_decref(&item); ctx->stack->top->subitems--; if (ctx->stack->top->subitems == 0) { - cbor_item_t *stack_item = ctx->stack->top->item; + cbor_item_t* stack_item = ctx->stack->top->item; _cbor_stack_pop(ctx->stack); _cbor_builder_append(stack_item, ctx); } @@ -86,7 +86,7 @@ void _cbor_builder_append(cbor_item_t *item, CBOR_ASSERT(ctx->stack->top->subitems > 0); ctx->stack->top->subitems--; if (ctx->stack->top->subitems == 0) { - cbor_item_t *map_entry = ctx->stack->top->item; + cbor_item_t* map_entry = ctx->stack->top->item; _cbor_stack_pop(ctx->stack); _cbor_builder_append(map_entry, ctx); } @@ -100,7 +100,7 @@ void _cbor_builder_append(cbor_item_t *item, CBOR_ASSERT(ctx->stack->top->subitems == 1); cbor_tag_set_item(ctx->stack->top->item, item); cbor_decref(&item); /* Give up on our reference */ - cbor_item_t *tagged_item = ctx->stack->top->item; + cbor_item_t* tagged_item = ctx->stack->top->item; _cbor_stack_pop(ctx->stack); _cbor_builder_append(tagged_item, ctx); break; @@ -139,90 +139,90 @@ void _cbor_builder_append(cbor_item_t *item, } \ } while (0) -void cbor_builder_uint8_callback(void *context, uint8_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int8(); +void cbor_builder_uint8_callback(void* context, uint8_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int8(); CHECK_RES(ctx, res); cbor_mark_uint(res); cbor_set_uint8(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_uint16_callback(void *context, uint16_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int16(); +void cbor_builder_uint16_callback(void* context, uint16_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int16(); CHECK_RES(ctx, res); cbor_mark_uint(res); cbor_set_uint16(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_uint32_callback(void *context, uint32_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int32(); +void cbor_builder_uint32_callback(void* context, uint32_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int32(); CHECK_RES(ctx, res); cbor_mark_uint(res); cbor_set_uint32(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_uint64_callback(void *context, uint64_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int64(); +void cbor_builder_uint64_callback(void* context, uint64_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int64(); CHECK_RES(ctx, res); cbor_mark_uint(res); cbor_set_uint64(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_negint8_callback(void *context, uint8_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int8(); +void cbor_builder_negint8_callback(void* context, uint8_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int8(); CHECK_RES(ctx, res); cbor_mark_negint(res); cbor_set_uint8(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_negint16_callback(void *context, uint16_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int16(); +void cbor_builder_negint16_callback(void* context, uint16_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int16(); CHECK_RES(ctx, res); cbor_mark_negint(res); cbor_set_uint16(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_negint32_callback(void *context, uint32_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int32(); +void cbor_builder_negint32_callback(void* context, uint32_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int32(); CHECK_RES(ctx, res); cbor_mark_negint(res); cbor_set_uint32(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_negint64_callback(void *context, uint64_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_int64(); +void cbor_builder_negint64_callback(void* context, uint64_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_int64(); CHECK_RES(ctx, res); cbor_mark_negint(res); cbor_set_uint64(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_byte_string_callback(void *context, cbor_data data, +void cbor_builder_byte_string_callback(void* context, cbor_data data, uint64_t length) { - struct _cbor_decoder_context *ctx = context; + struct _cbor_decoder_context* ctx = context; CHECK_LENGTH(ctx, length); - unsigned char *new_handle = _cbor_malloc(length); + unsigned char* new_handle = _cbor_malloc(length); if (new_handle == NULL) { ctx->creation_failed = true; return; } memcpy(new_handle, data, length); - cbor_item_t *new_chunk = cbor_new_definite_bytestring(); + cbor_item_t* new_chunk = cbor_new_definite_bytestring(); if (new_chunk == NULL) { _cbor_free(new_handle); @@ -245,26 +245,26 @@ void cbor_builder_byte_string_callback(void *context, cbor_data data, } } -void cbor_builder_byte_string_start_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_indefinite_bytestring(); +void cbor_builder_byte_string_start_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_indefinite_bytestring(); CHECK_RES(ctx, res); PUSH_CTX_STACK(ctx, res, 0); } -void cbor_builder_string_callback(void *context, cbor_data data, +void cbor_builder_string_callback(void* context, cbor_data data, uint64_t length) { - struct _cbor_decoder_context *ctx = context; + struct _cbor_decoder_context* ctx = context; CHECK_LENGTH(ctx, length); - unsigned char *new_handle = _cbor_malloc(length); + unsigned char* new_handle = _cbor_malloc(length); if (new_handle == NULL) { ctx->creation_failed = true; return; } memcpy(new_handle, data, length); - cbor_item_t *new_chunk = cbor_new_definite_string(); + cbor_item_t* new_chunk = cbor_new_definite_string(); if (new_chunk == NULL) { _cbor_free(new_handle); ctx->creation_failed = true; @@ -285,17 +285,17 @@ void cbor_builder_string_callback(void *context, cbor_data data, } } -void cbor_builder_string_start_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_indefinite_string(); +void cbor_builder_string_start_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_indefinite_string(); CHECK_RES(ctx, res); PUSH_CTX_STACK(ctx, res, 0); } -void cbor_builder_array_start_callback(void *context, uint64_t size) { - struct _cbor_decoder_context *ctx = context; +void cbor_builder_array_start_callback(void* context, uint64_t size) { + struct _cbor_decoder_context* ctx = context; CHECK_LENGTH(ctx, size); - cbor_item_t *res = cbor_new_definite_array(size); + cbor_item_t* res = cbor_new_definite_array(size); CHECK_RES(ctx, res); if (size > 0) { PUSH_CTX_STACK(ctx, res, size); @@ -304,24 +304,24 @@ void cbor_builder_array_start_callback(void *context, uint64_t size) { } } -void cbor_builder_indef_array_start_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_indefinite_array(); +void cbor_builder_indef_array_start_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_indefinite_array(); CHECK_RES(ctx, res); PUSH_CTX_STACK(ctx, res, 0); } -void cbor_builder_indef_map_start_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_indefinite_map(); +void cbor_builder_indef_map_start_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_indefinite_map(); CHECK_RES(ctx, res); PUSH_CTX_STACK(ctx, res, 0); } -void cbor_builder_map_start_callback(void *context, uint64_t size) { - struct _cbor_decoder_context *ctx = context; +void cbor_builder_map_start_callback(void* context, uint64_t size) { + struct _cbor_decoder_context* ctx = context; CHECK_LENGTH(ctx, size); - cbor_item_t *res = cbor_new_definite_map(size); + cbor_item_t* res = cbor_new_definite_map(size); CHECK_RES(ctx, res); if (size > 0) { PUSH_CTX_STACK(ctx, res, size * 2); @@ -333,7 +333,7 @@ void cbor_builder_map_start_callback(void *context, uint64_t size) { /** * Is the (partially constructed) item indefinite? */ -bool _cbor_is_indefinite(cbor_item_t *item) { +bool _cbor_is_indefinite(cbor_item_t* item) { switch (item->type) { case CBOR_TYPE_BYTESTRING: return cbor_bytestring_is_indefinite(item); @@ -350,11 +350,11 @@ bool _cbor_is_indefinite(cbor_item_t *item) { } } -void cbor_builder_indef_break_callback(void *context) { - struct _cbor_decoder_context *ctx = context; +void cbor_builder_indef_break_callback(void* context) { + struct _cbor_decoder_context* ctx = context; /* There must be an item to break out of*/ if (ctx->stack->size > 0) { - cbor_item_t *item = ctx->stack->top->item; + cbor_item_t* item = ctx->stack->top->item; if (_cbor_is_indefinite( item) && /* Only indefinite items can be terminated by 0xFF */ /* Special case: we cannot append up if an indefinite map is incomplete @@ -369,54 +369,54 @@ void cbor_builder_indef_break_callback(void *context) { ctx->syntax_error = true; } -void cbor_builder_float2_callback(void *context, float value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_float2(); +void cbor_builder_float2_callback(void* context, float value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_float2(); CHECK_RES(ctx, res); cbor_set_float2(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_float4_callback(void *context, float value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_float4(); +void cbor_builder_float4_callback(void* context, float value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_float4(); CHECK_RES(ctx, res); cbor_set_float4(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_float8_callback(void *context, double value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_float8(); +void cbor_builder_float8_callback(void* context, double value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_float8(); CHECK_RES(ctx, res); cbor_set_float8(res, value); _cbor_builder_append(res, ctx); } -void cbor_builder_null_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_null(); +void cbor_builder_null_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_null(); CHECK_RES(ctx, res); _cbor_builder_append(res, ctx); } -void cbor_builder_undefined_callback(void *context) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_undef(); +void cbor_builder_undefined_callback(void* context) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_undef(); CHECK_RES(ctx, res); _cbor_builder_append(res, ctx); } -void cbor_builder_boolean_callback(void *context, bool value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_build_bool(value); +void cbor_builder_boolean_callback(void* context, bool value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_build_bool(value); CHECK_RES(ctx, res); _cbor_builder_append(res, ctx); } -void cbor_builder_tag_callback(void *context, uint64_t value) { - struct _cbor_decoder_context *ctx = context; - cbor_item_t *res = cbor_new_tag(value); +void cbor_builder_tag_callback(void* context, uint64_t value) { + struct _cbor_decoder_context* ctx = context; + cbor_item_t* res = cbor_new_tag(value); CHECK_RES(ctx, res); PUSH_CTX_STACK(ctx, res, 1); } diff --git a/contrib/libcbor/src/cbor/internal/builder_callbacks.h b/contrib/libcbor/src/cbor/internal/builder_callbacks.h index 7893960e4131..0e034306952b 100644 --- a/contrib/libcbor/src/cbor/internal/builder_callbacks.h +++ b/contrib/libcbor/src/cbor/internal/builder_callbacks.h @@ -22,61 +22,61 @@ struct _cbor_decoder_context { bool creation_failed; /** Stack expectation mismatch */ bool syntax_error; - cbor_item_t *root; - struct _cbor_stack *stack; + cbor_item_t* root; + struct _cbor_stack* stack; }; /** Internal helper: Append item to the top of the stack while handling errors. */ -void _cbor_builder_append(cbor_item_t *item, struct _cbor_decoder_context *ctx); +void _cbor_builder_append(cbor_item_t* item, struct _cbor_decoder_context* ctx); -void cbor_builder_uint8_callback(void *, uint8_t); +void cbor_builder_uint8_callback(void*, uint8_t); -void cbor_builder_uint16_callback(void *, uint16_t); +void cbor_builder_uint16_callback(void*, uint16_t); -void cbor_builder_uint32_callback(void *, uint32_t); +void cbor_builder_uint32_callback(void*, uint32_t); -void cbor_builder_uint64_callback(void *, uint64_t); +void cbor_builder_uint64_callback(void*, uint64_t); -void cbor_builder_negint8_callback(void *, uint8_t); +void cbor_builder_negint8_callback(void*, uint8_t); -void cbor_builder_negint16_callback(void *, uint16_t); +void cbor_builder_negint16_callback(void*, uint16_t); -void cbor_builder_negint32_callback(void *, uint32_t); +void cbor_builder_negint32_callback(void*, uint32_t); -void cbor_builder_negint64_callback(void *, uint64_t); +void cbor_builder_negint64_callback(void*, uint64_t); -void cbor_builder_string_callback(void *, cbor_data, uint64_t); +void cbor_builder_string_callback(void*, cbor_data, uint64_t); -void cbor_builder_string_start_callback(void *); +void cbor_builder_string_start_callback(void*); -void cbor_builder_byte_string_callback(void *, cbor_data, uint64_t); +void cbor_builder_byte_string_callback(void*, cbor_data, uint64_t); -void cbor_builder_byte_string_start_callback(void *); +void cbor_builder_byte_string_start_callback(void*); -void cbor_builder_array_start_callback(void *, uint64_t); +void cbor_builder_array_start_callback(void*, uint64_t); -void cbor_builder_indef_array_start_callback(void *); +void cbor_builder_indef_array_start_callback(void*); -void cbor_builder_map_start_callback(void *, uint64_t); +void cbor_builder_map_start_callback(void*, uint64_t); -void cbor_builder_indef_map_start_callback(void *); +void cbor_builder_indef_map_start_callback(void*); -void cbor_builder_tag_callback(void *, uint64_t); +void cbor_builder_tag_callback(void*, uint64_t); -void cbor_builder_float2_callback(void *, float); +void cbor_builder_float2_callback(void*, float); -void cbor_builder_float4_callback(void *, float); +void cbor_builder_float4_callback(void*, float); -void cbor_builder_float8_callback(void *, double); +void cbor_builder_float8_callback(void*, double); -void cbor_builder_null_callback(void *); +void cbor_builder_null_callback(void*); -void cbor_builder_undefined_callback(void *); +void cbor_builder_undefined_callback(void*); -void cbor_builder_boolean_callback(void *, bool); +void cbor_builder_boolean_callback(void*, bool); -void cbor_builder_indef_break_callback(void *); +void cbor_builder_indef_break_callback(void*); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/internal/encoders.c b/contrib/libcbor/src/cbor/internal/encoders.c index 49d4d7f33d2b..773df7970c29 100644 --- a/contrib/libcbor/src/cbor/internal/encoders.c +++ b/contrib/libcbor/src/cbor/internal/encoders.c @@ -6,9 +6,10 @@ */ #include "encoders.h" + #include <string.h> -size_t _cbor_encode_uint8(uint8_t value, unsigned char *buffer, +size_t _cbor_encode_uint8(uint8_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset) { if (value <= 23) { if (buffer_size >= 1) { @@ -25,43 +26,43 @@ size_t _cbor_encode_uint8(uint8_t value, unsigned char *buffer, return 0; } -size_t _cbor_encode_uint16(uint16_t value, unsigned char *buffer, +size_t _cbor_encode_uint16(uint16_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset) { - if (buffer_size >= 3) { - buffer[0] = 0x19 + offset; + if (buffer_size < 3) { + return 0; + } + buffer[0] = 0x19 + offset; #ifdef IS_BIG_ENDIAN - memcpy(buffer + 1, &value, 2); + memcpy(buffer + 1, &value, 2); #else - buffer[1] = (unsigned char)(value >> 8); - buffer[2] = (unsigned char)value; + buffer[1] = (unsigned char)(value >> 8); + buffer[2] = (unsigned char)value; #endif - return 3; - } else - return 0; + return 3; } -size_t _cbor_encode_uint32(uint32_t value, unsigned char *buffer, +size_t _cbor_encode_uint32(uint32_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset) { - if (buffer_size >= 5) { - buffer[0] = 0x1A + offset; + if (buffer_size < 5) { + return 0; + } + buffer[0] = 0x1A + offset; #ifdef IS_BIG_ENDIAN - memcpy(buffer + 1, &value, 4); + memcpy(buffer + 1, &value, 4); #else - buffer[1] = (unsigned char)(value >> 24); - buffer[2] = (unsigned char)(value >> 16); - buffer[3] = (unsigned char)(value >> 8); - buffer[4] = (unsigned char)value; + buffer[1] = (unsigned char)(value >> 24); + buffer[2] = (unsigned char)(value >> 16); + buffer[3] = (unsigned char)(value >> 8); + buffer[4] = (unsigned char)value; #endif - return 5; - } else - return 0; + return 5; } -size_t _cbor_encode_uint64(uint64_t value, unsigned char *buffer, +size_t _cbor_encode_uint64(uint64_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset) { if (buffer_size >= 9) { buffer[0] = 0x1B + offset; @@ -84,7 +85,7 @@ size_t _cbor_encode_uint64(uint64_t value, unsigned char *buffer, return 0; } -size_t _cbor_encode_uint(uint64_t value, unsigned char *buffer, +size_t _cbor_encode_uint(uint64_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset) { if (value <= UINT16_MAX) if (value <= UINT8_MAX) diff --git a/contrib/libcbor/src/cbor/internal/encoders.h b/contrib/libcbor/src/cbor/internal/encoders.h index 7eadb7121646..162b0bad2c13 100644 --- a/contrib/libcbor/src/cbor/internal/encoders.h +++ b/contrib/libcbor/src/cbor/internal/encoders.h @@ -15,23 +15,23 @@ extern "C" { #endif _CBOR_NODISCARD -size_t _cbor_encode_uint8(uint8_t value, unsigned char *buffer, +size_t _cbor_encode_uint8(uint8_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset); _CBOR_NODISCARD -size_t _cbor_encode_uint16(uint16_t value, unsigned char *buffer, +size_t _cbor_encode_uint16(uint16_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset); _CBOR_NODISCARD -size_t _cbor_encode_uint32(uint32_t value, unsigned char *buffer, +size_t _cbor_encode_uint32(uint32_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset); _CBOR_NODISCARD -size_t _cbor_encode_uint64(uint64_t value, unsigned char *buffer, +size_t _cbor_encode_uint64(uint64_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset); _CBOR_NODISCARD -size_t _cbor_encode_uint(uint64_t value, unsigned char *buffer, +size_t _cbor_encode_uint(uint64_t value, unsigned char* buffer, size_t buffer_size, uint8_t offset); #ifdef __cplusplus diff --git a/contrib/libcbor/src/cbor/internal/loaders.c b/contrib/libcbor/src/cbor/internal/loaders.c index cfa173de7905..b89cf72235c6 100644 --- a/contrib/libcbor/src/cbor/internal/loaders.c +++ b/contrib/libcbor/src/cbor/internal/loaders.c @@ -11,7 +11,7 @@ uint8_t _cbor_load_uint8(cbor_data source) { return (uint8_t)*source; } -uint16_t _cbor_load_uint16(const unsigned char *source) { +uint16_t _cbor_load_uint16(const unsigned char* source) { #ifdef IS_BIG_ENDIAN uint16_t result; memcpy(&result, source, 2); @@ -21,7 +21,7 @@ uint16_t _cbor_load_uint16(const unsigned char *source) { #endif } -uint32_t _cbor_load_uint32(const unsigned char *source) { +uint32_t _cbor_load_uint32(const unsigned char* source) { #ifdef IS_BIG_ENDIAN uint32_t result; memcpy(&result, source, 4); @@ -33,7 +33,7 @@ uint32_t _cbor_load_uint32(const unsigned char *source) { #endif } -uint64_t _cbor_load_uint64(const unsigned char *source) { +uint64_t _cbor_load_uint64(const unsigned char* source) { #ifdef IS_BIG_ENDIAN uint64_t result; memcpy(&result, source, 8); @@ -50,7 +50,9 @@ uint64_t _cbor_load_uint64(const unsigned char *source) { } /* As per https://www.rfc-editor.org/rfc/rfc8949.html#name-half-precision */ -float _cbor_decode_half(unsigned char *halfp) { +float _cbor_decode_half(unsigned char* halfp) { + // TODO: Broken if we are not on IEEE 754 + // (https://github.com/PJK/libcbor/issues/336) int half = (halfp[0] << 8) + halfp[1]; int exp = (half >> 10) & 0x1f; int mant = half & 0x3ff; @@ -66,15 +68,19 @@ float _cbor_decode_half(unsigned char *halfp) { float _cbor_load_half(cbor_data source) { /* Discard const */ - return _cbor_decode_half((unsigned char *)source); + return _cbor_decode_half((unsigned char*)source); } float _cbor_load_float(cbor_data source) { + // TODO: Broken if we are not on IEEE 754 + // (https://github.com/PJK/libcbor/issues/336) union _cbor_float_helper helper = {.as_uint = _cbor_load_uint32(source)}; return helper.as_float; } double _cbor_load_double(cbor_data source) { + // TODO: Broken if we are not on IEEE 754 + // (https://github.com/PJK/libcbor/issues/336) union _cbor_double_helper helper = {.as_uint = _cbor_load_uint64(source)}; return helper.as_double; } diff --git a/contrib/libcbor/src/cbor/internal/loaders.h b/contrib/libcbor/src/cbor/internal/loaders.h index ce37563a3d80..9e8eb68e29a2 100644 --- a/contrib/libcbor/src/cbor/internal/loaders.h +++ b/contrib/libcbor/src/cbor/internal/loaders.h @@ -16,16 +16,16 @@ extern "C" { /* Read the given uint from the given location, no questions asked */ _CBOR_NODISCARD -uint8_t _cbor_load_uint8(const unsigned char *source); +uint8_t _cbor_load_uint8(const unsigned char* source); _CBOR_NODISCARD -uint16_t _cbor_load_uint16(const unsigned char *source); +uint16_t _cbor_load_uint16(const unsigned char* source); _CBOR_NODISCARD -uint32_t _cbor_load_uint32(const unsigned char *source); +uint32_t _cbor_load_uint32(const unsigned char* source); _CBOR_NODISCARD -uint64_t _cbor_load_uint64(const unsigned char *source); +uint64_t _cbor_load_uint64(const unsigned char* source); _CBOR_NODISCARD float _cbor_load_half(cbor_data source); diff --git a/contrib/libcbor/src/cbor/internal/stack.c b/contrib/libcbor/src/cbor/internal/stack.c index 2db03cbbf081..00e6aed52376 100644 --- a/contrib/libcbor/src/cbor/internal/stack.c +++ b/contrib/libcbor/src/cbor/internal/stack.c @@ -11,18 +11,18 @@ struct _cbor_stack _cbor_stack_init(void) { return (struct _cbor_stack){.top = NULL, .size = 0}; } -void _cbor_stack_pop(struct _cbor_stack *stack) { - struct _cbor_stack_record *top = stack->top; +void _cbor_stack_pop(struct _cbor_stack* stack) { + struct _cbor_stack_record* top = stack->top; stack->top = stack->top->lower; _cbor_free(top); stack->size--; } -struct _cbor_stack_record *_cbor_stack_push(struct _cbor_stack *stack, - cbor_item_t *item, +struct _cbor_stack_record* _cbor_stack_push(struct _cbor_stack* stack, + cbor_item_t* item, size_t subitems) { if (stack->size == CBOR_MAX_STACK_SIZE) return NULL; - struct _cbor_stack_record *new_top = + struct _cbor_stack_record* new_top = _cbor_malloc(sizeof(struct _cbor_stack_record)); if (new_top == NULL) return NULL; diff --git a/contrib/libcbor/src/cbor/internal/stack.h b/contrib/libcbor/src/cbor/internal/stack.h index cf2206b40e58..7bc43ac09f9b 100644 --- a/contrib/libcbor/src/cbor/internal/stack.h +++ b/contrib/libcbor/src/cbor/internal/stack.h @@ -17,9 +17,9 @@ extern "C" { /** Simple stack record for the parser */ struct _cbor_stack_record { /** Pointer to the parent stack frame */ - struct _cbor_stack_record *lower; + struct _cbor_stack_record* lower; /** Item under construction */ - cbor_item_t *item; + cbor_item_t* item; /** * How many outstanding subitems are expected. * @@ -33,17 +33,17 @@ struct _cbor_stack_record { /** Stack handle - contents and size */ struct _cbor_stack { - struct _cbor_stack_record *top; + struct _cbor_stack_record* top; size_t size; }; _CBOR_NODISCARD struct _cbor_stack _cbor_stack_init(void); -void _cbor_stack_pop(struct _cbor_stack *); +void _cbor_stack_pop(struct _cbor_stack*); _CBOR_NODISCARD -struct _cbor_stack_record *_cbor_stack_push(struct _cbor_stack *, cbor_item_t *, +struct _cbor_stack_record* _cbor_stack_push(struct _cbor_stack*, cbor_item_t*, size_t); #ifdef __cplusplus diff --git a/contrib/libcbor/src/cbor/ints.c b/contrib/libcbor/src/cbor/ints.c index b4d035a1897e..140d688f8215 100644 --- a/contrib/libcbor/src/cbor/ints.c +++ b/contrib/libcbor/src/cbor/ints.c @@ -7,38 +7,39 @@ #include "ints.h" -cbor_int_width cbor_int_get_width(const cbor_item_t *item) { +cbor_int_width cbor_int_get_width(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); return item->metadata.int_metadata.width; } -uint8_t cbor_get_uint8(const cbor_item_t *item) { +uint8_t cbor_get_uint8(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_8); return *item->data; } -uint16_t cbor_get_uint16(const cbor_item_t *item) { +uint16_t cbor_get_uint16(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_16); - return *(uint16_t *)item->data; + return *(uint16_t*)item->data; } -uint32_t cbor_get_uint32(const cbor_item_t *item) { +uint32_t cbor_get_uint32(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_32); - return *(uint32_t *)item->data; + return *(uint32_t*)item->data; } -uint64_t cbor_get_uint64(const cbor_item_t *item) { +uint64_t cbor_get_uint64(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_64); - return *(uint64_t *)item->data; + return *(uint64_t*)item->data; } -uint64_t cbor_get_int(const cbor_item_t *item) { +uint64_t cbor_get_int(const cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); - // cppcheck-suppress missingReturn + CBOR_ASSERT(cbor_int_get_width(item) >= CBOR_INT_8 && + cbor_int_get_width(item) <= CBOR_INT_64); switch (cbor_int_get_width(item)) { case CBOR_INT_8: return cbor_get_uint8(item); @@ -48,141 +49,144 @@ uint64_t cbor_get_int(const cbor_item_t *item) { return cbor_get_uint32(item); case CBOR_INT_64: return cbor_get_uint64(item); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } -void cbor_set_uint8(cbor_item_t *item, uint8_t value) { +void cbor_set_uint8(cbor_item_t* item, uint8_t value) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_8); *item->data = value; } -void cbor_set_uint16(cbor_item_t *item, uint16_t value) { +void cbor_set_uint16(cbor_item_t* item, uint16_t value) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_16); - *(uint16_t *)item->data = value; + *(uint16_t*)item->data = value; } -void cbor_set_uint32(cbor_item_t *item, uint32_t value) { +void cbor_set_uint32(cbor_item_t* item, uint32_t value) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_32); - *(uint32_t *)item->data = value; + *(uint32_t*)item->data = value; } -void cbor_set_uint64(cbor_item_t *item, uint64_t value) { +void cbor_set_uint64(cbor_item_t* item, uint64_t value) { CBOR_ASSERT(cbor_is_int(item)); CBOR_ASSERT(cbor_int_get_width(item) == CBOR_INT_64); - *(uint64_t *)item->data = value; + *(uint64_t*)item->data = value; } -void cbor_mark_uint(cbor_item_t *item) { +void cbor_mark_uint(cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); item->type = CBOR_TYPE_UINT; } -void cbor_mark_negint(cbor_item_t *item) { +void cbor_mark_negint(cbor_item_t* item) { CBOR_ASSERT(cbor_is_int(item)); item->type = CBOR_TYPE_NEGINT; } -cbor_item_t *cbor_new_int8(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 1); +cbor_item_t* cbor_new_int8(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 1); _CBOR_NOTNULL(item); - *item = (cbor_item_t){.data = (unsigned char *)item + sizeof(cbor_item_t), + *item = (cbor_item_t){.data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.int_metadata = {.width = CBOR_INT_8}}, .type = CBOR_TYPE_UINT}; return item; } -cbor_item_t *cbor_new_int16(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 2); +cbor_item_t* cbor_new_int16(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 2); _CBOR_NOTNULL(item); - *item = (cbor_item_t){.data = (unsigned char *)item + sizeof(cbor_item_t), + *item = (cbor_item_t){.data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.int_metadata = {.width = CBOR_INT_16}}, .type = CBOR_TYPE_UINT}; return item; } -cbor_item_t *cbor_new_int32(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 4); +cbor_item_t* cbor_new_int32(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 4); _CBOR_NOTNULL(item); - *item = (cbor_item_t){.data = (unsigned char *)item + sizeof(cbor_item_t), + *item = (cbor_item_t){.data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.int_metadata = {.width = CBOR_INT_32}}, .type = CBOR_TYPE_UINT}; return item; } -cbor_item_t *cbor_new_int64(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t) + 8); +cbor_item_t* cbor_new_int64(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t) + 8); _CBOR_NOTNULL(item); - *item = (cbor_item_t){.data = (unsigned char *)item + sizeof(cbor_item_t), + *item = (cbor_item_t){.data = (unsigned char*)item + sizeof(cbor_item_t), .refcount = 1, .metadata = {.int_metadata = {.width = CBOR_INT_64}}, .type = CBOR_TYPE_UINT}; return item; } -cbor_item_t *cbor_build_uint8(uint8_t value) { - cbor_item_t *item = cbor_new_int8(); +cbor_item_t* cbor_build_uint8(uint8_t value) { + cbor_item_t* item = cbor_new_int8(); _CBOR_NOTNULL(item); cbor_set_uint8(item, value); cbor_mark_uint(item); return item; } -cbor_item_t *cbor_build_uint16(uint16_t value) { - cbor_item_t *item = cbor_new_int16(); +cbor_item_t* cbor_build_uint16(uint16_t value) { + cbor_item_t* item = cbor_new_int16(); _CBOR_NOTNULL(item); cbor_set_uint16(item, value); cbor_mark_uint(item); return item; } -cbor_item_t *cbor_build_uint32(uint32_t value) { - cbor_item_t *item = cbor_new_int32(); +cbor_item_t* cbor_build_uint32(uint32_t value) { + cbor_item_t* item = cbor_new_int32(); _CBOR_NOTNULL(item); cbor_set_uint32(item, value); cbor_mark_uint(item); return item; } -cbor_item_t *cbor_build_uint64(uint64_t value) { - cbor_item_t *item = cbor_new_int64(); +cbor_item_t* cbor_build_uint64(uint64_t value) { + cbor_item_t* item = cbor_new_int64(); _CBOR_NOTNULL(item); cbor_set_uint64(item, value); cbor_mark_uint(item); return item; } -cbor_item_t *cbor_build_negint8(uint8_t value) { - cbor_item_t *item = cbor_new_int8(); +cbor_item_t* cbor_build_negint8(uint8_t value) { + cbor_item_t* item = cbor_new_int8(); _CBOR_NOTNULL(item); cbor_set_uint8(item, value); cbor_mark_negint(item); return item; } -cbor_item_t *cbor_build_negint16(uint16_t value) { - cbor_item_t *item = cbor_new_int16(); +cbor_item_t* cbor_build_negint16(uint16_t value) { + cbor_item_t* item = cbor_new_int16(); _CBOR_NOTNULL(item); cbor_set_uint16(item, value); cbor_mark_negint(item); return item; } -cbor_item_t *cbor_build_negint32(uint32_t value) { - cbor_item_t *item = cbor_new_int32(); +cbor_item_t* cbor_build_negint32(uint32_t value) { + cbor_item_t* item = cbor_new_int32(); _CBOR_NOTNULL(item); cbor_set_uint32(item, value); cbor_mark_negint(item); return item; } -cbor_item_t *cbor_build_negint64(uint64_t value) { - cbor_item_t *item = cbor_new_int64(); +cbor_item_t* cbor_build_negint64(uint64_t value) { + cbor_item_t* item = cbor_new_int64(); _CBOR_NOTNULL(item); cbor_set_uint64(item, value); cbor_mark_negint(item); diff --git a/contrib/libcbor/src/cbor/ints.h b/contrib/libcbor/src/cbor/ints.h index 006aa428e0a5..30d061035fe0 100644 --- a/contrib/libcbor/src/cbor/ints.h +++ b/contrib/libcbor/src/cbor/ints.h @@ -26,35 +26,35 @@ extern "C" { * @param item positive or negative integer * @return the value */ -_CBOR_NODISCARD CBOR_EXPORT uint8_t cbor_get_uint8(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint8_t cbor_get_uint8(const cbor_item_t* item); /** Extracts the integer value * * @param item positive or negative integer * @return the value */ -_CBOR_NODISCARD CBOR_EXPORT uint16_t cbor_get_uint16(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint16_t cbor_get_uint16(const cbor_item_t* item); /** Extracts the integer value * * @param item positive or negative integer * @return the value */ -_CBOR_NODISCARD CBOR_EXPORT uint32_t cbor_get_uint32(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint32_t cbor_get_uint32(const cbor_item_t* item); /** Extracts the integer value * * @param item positive or negative integer * @return the value */ -_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_get_uint64(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_get_uint64(const cbor_item_t* item); /** Extracts the integer value * * @param item positive or negative integer * @return the value, extended to `uint64_t` */ -_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_get_int(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_get_int(const cbor_item_t* item); /** Assigns the integer value * @@ -62,7 +62,7 @@ _CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_get_int(const cbor_item_t *item); * @param value the value to assign. For negative integer, the logical value is * `-value - 1` */ -CBOR_EXPORT void cbor_set_uint8(cbor_item_t *item, uint8_t value); +CBOR_EXPORT void cbor_set_uint8(cbor_item_t* item, uint8_t value); /** Assigns the integer value * @@ -70,7 +70,7 @@ CBOR_EXPORT void cbor_set_uint8(cbor_item_t *item, uint8_t value); * @param value the value to assign. For negative integer, the logical value is * `-value - 1` */ -CBOR_EXPORT void cbor_set_uint16(cbor_item_t *item, uint16_t value); +CBOR_EXPORT void cbor_set_uint16(cbor_item_t* item, uint16_t value); /** Assigns the integer value * @@ -78,7 +78,7 @@ CBOR_EXPORT void cbor_set_uint16(cbor_item_t *item, uint16_t value); * @param value the value to assign. For negative integer, the logical value is * `-value - 1` */ -CBOR_EXPORT void cbor_set_uint32(cbor_item_t *item, uint32_t value); +CBOR_EXPORT void cbor_set_uint32(cbor_item_t* item, uint32_t value); /** Assigns the integer value * @@ -86,7 +86,7 @@ CBOR_EXPORT void cbor_set_uint32(cbor_item_t *item, uint32_t value); * @param value the value to assign. For negative integer, the logical value is * `-value - 1` */ -CBOR_EXPORT void cbor_set_uint64(cbor_item_t *item, uint64_t value); +CBOR_EXPORT void cbor_set_uint64(cbor_item_t* item, uint64_t value); /** Queries the integer width * @@ -94,7 +94,7 @@ CBOR_EXPORT void cbor_set_uint64(cbor_item_t *item, uint64_t value); * @return the width */ _CBOR_NODISCARD CBOR_EXPORT cbor_int_width -cbor_int_get_width(const cbor_item_t *item); +cbor_int_get_width(const cbor_item_t* item); /** Marks the integer item as a positive integer * @@ -102,7 +102,7 @@ cbor_int_get_width(const cbor_item_t *item); * * @param item positive or negative integer item */ -CBOR_EXPORT void cbor_mark_uint(cbor_item_t *item); +CBOR_EXPORT void cbor_mark_uint(cbor_item_t* item); /** Marks the integer item as a negative integer * @@ -110,7 +110,7 @@ CBOR_EXPORT void cbor_mark_uint(cbor_item_t *item); * * @param item positive or negative integer item */ -CBOR_EXPORT void cbor_mark_negint(cbor_item_t *item); +CBOR_EXPORT void cbor_mark_negint(cbor_item_t* item); /** Allocates new integer with 1B width * @@ -119,7 +119,7 @@ CBOR_EXPORT void cbor_mark_negint(cbor_item_t *item); * @return **new** positive integer or `NULL` on memory allocation failure. The * value is not initialized */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int8(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_int8(void); /** Allocates new integer with 2B width * @@ -128,7 +128,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int8(void); * @return **new** positive integer or `NULL` on memory allocation failure. The * value is not initialized */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int16(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_int16(void); /** Allocates new integer with 4B width * @@ -137,7 +137,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int16(void); * @return **new** positive integer or `NULL` on memory allocation failure. The * value is not initialized */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int32(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_int32(void); /** Allocates new integer with 8B width * @@ -146,63 +146,63 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int32(void); * @return **new** positive integer or `NULL` on memory allocation failure. The * value is not initialized */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_int64(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_int64(void); /** Constructs a new positive integer * * @param value the value to use * @return **new** positive integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_uint8(uint8_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_uint8(uint8_t value); /** Constructs a new positive integer * * @param value the value to use * @return **new** positive integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_uint16(uint16_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_uint16(uint16_t value); /** Constructs a new positive integer * * @param value the value to use * @return **new** positive integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_uint32(uint32_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_uint32(uint32_t value); /** Constructs a new positive integer * * @param value the value to use * @return **new** positive integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_uint64(uint64_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_uint64(uint64_t value); /** Constructs a new negative integer * * @param value the value to use * @return **new** negative integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_negint8(uint8_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_negint8(uint8_t value); /** Constructs a new negative integer * * @param value the value to use * @return **new** negative integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_negint16(uint16_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_negint16(uint16_t value); /** Constructs a new negative integer * * @param value the value to use * @return **new** negative integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_negint32(uint32_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_negint32(uint32_t value); /** Constructs a new negative integer * * @param value the value to use * @return **new** negative integer or `NULL` on memory allocation failure */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_negint64(uint64_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_negint64(uint64_t value); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/maps.c b/contrib/libcbor/src/cbor/maps.c index 8a3bd6075681..7ce168a1ed7c 100644 --- a/contrib/libcbor/src/cbor/maps.c +++ b/contrib/libcbor/src/cbor/maps.c @@ -8,18 +8,18 @@ #include "maps.h" #include "internal/memory_utils.h" -size_t cbor_map_size(const cbor_item_t *item) { +size_t cbor_map_size(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_map(item)); return item->metadata.map_metadata.end_ptr; } -size_t cbor_map_allocated(const cbor_item_t *item) { +size_t cbor_map_allocated(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_map(item)); return item->metadata.map_metadata.allocated; } -cbor_item_t *cbor_new_definite_map(size_t size) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_definite_map(size_t size) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ @@ -34,8 +34,8 @@ cbor_item_t *cbor_new_definite_map(size_t size) { return item; } -cbor_item_t *cbor_new_indefinite_map(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_indefinite_map(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ @@ -49,12 +49,12 @@ cbor_item_t *cbor_new_indefinite_map(void) { return item; } -bool _cbor_map_add_key(cbor_item_t *item, cbor_item_t *key) { +bool _cbor_map_add_key(cbor_item_t* item, cbor_item_t* key) { CBOR_ASSERT(cbor_isa_map(item)); - struct _cbor_map_metadata *metadata = - (struct _cbor_map_metadata *)&item->metadata; + struct _cbor_map_metadata* metadata = + (struct _cbor_map_metadata*)&item->metadata; if (cbor_map_is_definite(item)) { - struct cbor_pair *data = cbor_map_handle(item); + struct cbor_pair* data = cbor_map_handle(item); if (metadata->end_ptr >= metadata->allocated) { /* Don't realloc definite preallocated map */ return false; @@ -74,7 +74,7 @@ bool _cbor_map_add_key(cbor_item_t *item, cbor_item_t *key) { ? 1 : CBOR_BUFFER_GROWTH * metadata->allocated; - unsigned char *new_data = _cbor_realloc_multiple( + unsigned char* new_data = _cbor_realloc_multiple( item->data, sizeof(struct cbor_pair), new_allocation); if (new_data == NULL) { @@ -84,7 +84,7 @@ bool _cbor_map_add_key(cbor_item_t *item, cbor_item_t *key) { item->data = new_data; metadata->allocated = new_allocation; } - struct cbor_pair *data = cbor_map_handle(item); + struct cbor_pair* data = cbor_map_handle(item); data[metadata->end_ptr].key = key; data[metadata->end_ptr++].value = NULL; } @@ -92,7 +92,7 @@ bool _cbor_map_add_key(cbor_item_t *item, cbor_item_t *key) { return true; } -bool _cbor_map_add_value(cbor_item_t *item, cbor_item_t *value) { +bool _cbor_map_add_value(cbor_item_t* item, cbor_item_t* value) { CBOR_ASSERT(cbor_isa_map(item)); cbor_incref(value); cbor_map_handle(item)[ @@ -104,22 +104,22 @@ bool _cbor_map_add_value(cbor_item_t *item, cbor_item_t *value) { } // TODO: Add a more convenient API like add(item, key, val) -bool cbor_map_add(cbor_item_t *item, struct cbor_pair pair) { +bool cbor_map_add(cbor_item_t* item, struct cbor_pair pair) { CBOR_ASSERT(cbor_isa_map(item)); if (!_cbor_map_add_key(item, pair.key)) return false; return _cbor_map_add_value(item, pair.value); } -bool cbor_map_is_definite(const cbor_item_t *item) { +bool cbor_map_is_definite(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_map(item)); return item->metadata.map_metadata.type == _CBOR_METADATA_DEFINITE; } -bool cbor_map_is_indefinite(const cbor_item_t *item) { +bool cbor_map_is_indefinite(const cbor_item_t* item) { return !cbor_map_is_definite(item); } -struct cbor_pair *cbor_map_handle(const cbor_item_t *item) { +struct cbor_pair* cbor_map_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_map(item)); - return (struct cbor_pair *)item->data; + return (struct cbor_pair*)item->data; } diff --git a/contrib/libcbor/src/cbor/maps.h b/contrib/libcbor/src/cbor/maps.h index 5c05b542bd84..d8b5f53b9e2a 100644 --- a/contrib/libcbor/src/cbor/maps.h +++ b/contrib/libcbor/src/cbor/maps.h @@ -26,14 +26,14 @@ extern "C" { * @param item A map * @return The number of pairs */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_map_size(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_map_size(const cbor_item_t* item); /** Get the size of the allocated storage * * @param item A map * @return Allocated storage size (as the number of #cbor_pair items) */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_map_allocated(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_map_allocated(const cbor_item_t* item); /** Create a new definite map * @@ -42,7 +42,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_map_allocated(const cbor_item_t *item); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_definite_map(size_t size); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_definite_map(size_t size); /** Create a new indefinite map * @@ -50,7 +50,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_definite_map(size_t size); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_indefinite_map(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_indefinite_map(void); /** Add a pair to the map * @@ -63,7 +63,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_indefinite_map(void); * @return `true` on success, `false` if memory allocation failed (indefinite * maps) or the preallocated storage is full (definite maps) */ -_CBOR_NODISCARD CBOR_EXPORT bool cbor_map_add(cbor_item_t *item, +_CBOR_NODISCARD CBOR_EXPORT bool cbor_map_add(cbor_item_t* item, struct cbor_pair pair); /** Add a key to the map @@ -75,8 +75,8 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_map_add(cbor_item_t *item, * @return `true` on success, `false` if either reallocation failed or the * preallocated storage is full */ -_CBOR_NODISCARD CBOR_EXPORT bool _cbor_map_add_key(cbor_item_t *item, - cbor_item_t *key); +_CBOR_NODISCARD CBOR_EXPORT bool _cbor_map_add_key(cbor_item_t* item, + cbor_item_t* key); /** Add a value to the map * @@ -87,15 +87,15 @@ _CBOR_NODISCARD CBOR_EXPORT bool _cbor_map_add_key(cbor_item_t *item, * @return `true` on success, `false` if either reallocation failed or the * preallocated storage is full */ -_CBOR_NODISCARD CBOR_EXPORT bool _cbor_map_add_value(cbor_item_t *item, - cbor_item_t *value); +_CBOR_NODISCARD CBOR_EXPORT bool _cbor_map_add_value(cbor_item_t* item, + cbor_item_t* value); /** Is this map definite? * * @param item A map * @return Is this map definite? */ -_CBOR_NODISCARD CBOR_EXPORT bool cbor_map_is_definite(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT bool cbor_map_is_definite(const cbor_item_t* item); /** Is this map indefinite? * @@ -103,7 +103,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_map_is_definite(const cbor_item_t *item); * @return Is this map indefinite? */ _CBOR_NODISCARD CBOR_EXPORT bool cbor_map_is_indefinite( - const cbor_item_t *item); + const cbor_item_t* item); /** Get the pairs storage * @@ -111,8 +111,8 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_map_is_indefinite( * @return Array of #cbor_map_size pairs. Manipulation is possible as long as * references remain valid. */ -_CBOR_NODISCARD CBOR_EXPORT struct cbor_pair *cbor_map_handle( - const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT struct cbor_pair* cbor_map_handle( + const cbor_item_t* item); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/serialization.c b/contrib/libcbor/src/cbor/serialization.c index 40f4c531d575..b0f681c0c383 100644 --- a/contrib/libcbor/src/cbor/serialization.c +++ b/contrib/libcbor/src/cbor/serialization.c @@ -17,9 +17,9 @@ #include "encoding.h" #include "internal/memory_utils.h" -size_t cbor_serialize(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { - // cppcheck-suppress missingReturn + CBOR_ASSERT_VALID_TYPE(cbor_typeof(item)); switch (cbor_typeof(item)) { case CBOR_TYPE_UINT: return cbor_serialize_uint(item, buffer, buffer_size); @@ -37,6 +37,9 @@ size_t cbor_serialize(const cbor_item_t *item, unsigned char *buffer, return cbor_serialize_tag(item, buffer, buffer_size); case CBOR_TYPE_FLOAT_CTRL: return cbor_serialize_float_ctrl(item, buffer, buffer_size); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } @@ -58,11 +61,13 @@ size_t _cbor_encoded_header_size(uint64_t size) { return 9; } -size_t cbor_serialized_size(const cbor_item_t *item) { - // cppcheck-suppress missingReturn +size_t cbor_serialized_size(const cbor_item_t* item) { + CBOR_ASSERT_VALID_TYPE(cbor_typeof(item)); switch (cbor_typeof(item)) { case CBOR_TYPE_UINT: case CBOR_TYPE_NEGINT: + CBOR_ASSERT(cbor_int_get_width(item) >= CBOR_INT_8 && + cbor_int_get_width(item) <= CBOR_INT_64); switch (cbor_int_get_width(item)) { case CBOR_INT_8: if (cbor_get_uint8(item) <= kMaxEmbeddedInt) return 1; @@ -73,6 +78,9 @@ size_t cbor_serialized_size(const cbor_item_t *item) { return 5; case CBOR_INT_64: return 9; + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } // Note: We do not _cbor_safe_signaling_add zero-length definite strings, // they would cause zeroes to propagate. All other items are at least one @@ -86,7 +94,7 @@ size_t cbor_serialized_size(const cbor_item_t *item) { cbor_bytestring_length(item)); } size_t indef_bytestring_size = 2; // Leading byte + break - cbor_item_t **chunks = cbor_bytestring_chunks_handle(item); + cbor_item_t** chunks = cbor_bytestring_chunks_handle(item); for (size_t i = 0; i < cbor_bytestring_chunk_count(item); i++) { indef_bytestring_size = _cbor_safe_signaling_add( indef_bytestring_size, cbor_serialized_size(chunks[i])); @@ -101,7 +109,7 @@ size_t cbor_serialized_size(const cbor_item_t *item) { return _cbor_safe_signaling_add(header_size, cbor_string_length(item)); } size_t indef_string_size = 2; // Leading byte + break - cbor_item_t **chunks = cbor_string_chunks_handle(item); + cbor_item_t** chunks = cbor_string_chunks_handle(item); for (size_t i = 0; i < cbor_string_chunk_count(item); i++) { indef_string_size = _cbor_safe_signaling_add( indef_string_size, cbor_serialized_size(chunks[i])); @@ -112,7 +120,7 @@ size_t cbor_serialized_size(const cbor_item_t *item) { size_t array_size = cbor_array_is_definite(item) ? _cbor_encoded_header_size(cbor_array_size(item)) : 2; // Leading byte + break - cbor_item_t **items = cbor_array_handle(item); + cbor_item_t** items = cbor_array_handle(item); for (size_t i = 0; i < cbor_array_size(item); i++) { array_size = _cbor_safe_signaling_add(array_size, cbor_serialized_size(items[i])); @@ -123,7 +131,7 @@ size_t cbor_serialized_size(const cbor_item_t *item) { size_t map_size = cbor_map_is_definite(item) ? _cbor_encoded_header_size(cbor_map_size(item)) : 2; // Leading byte + break - struct cbor_pair *items = cbor_map_handle(item); + struct cbor_pair* items = cbor_map_handle(item); for (size_t i = 0; i < cbor_map_size(item); i++) { map_size = _cbor_safe_signaling_add( map_size, @@ -138,6 +146,8 @@ size_t cbor_serialized_size(const cbor_item_t *item) { cbor_serialized_size(cbor_move(cbor_tag_item(item)))); } case CBOR_TYPE_FLOAT_CTRL: + CBOR_ASSERT(cbor_float_get_width(item) >= CBOR_FLOAT_0 && + cbor_float_get_width(item) <= CBOR_FLOAT_64); switch (cbor_float_get_width(item)) { case CBOR_FLOAT_0: return _cbor_encoded_header_size(cbor_ctrl_value(item)); @@ -147,12 +157,18 @@ size_t cbor_serialized_size(const cbor_item_t *item) { return 5; case CBOR_FLOAT_64: return 9; + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } -size_t cbor_serialize_alloc(const cbor_item_t *item, unsigned char **buffer, - size_t *buffer_size) { +size_t cbor_serialize_alloc(const cbor_item_t* item, unsigned char** buffer, + size_t* buffer_size) { *buffer = NULL; size_t serialized_size = cbor_serialized_size(item); if (serialized_size == 0) { @@ -171,9 +187,11 @@ size_t cbor_serialize_alloc(const cbor_item_t *item, unsigned char **buffer, return written; } -size_t cbor_serialize_uint(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_uint(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_uint(item)); + CBOR_ASSERT(cbor_int_get_width(item) >= CBOR_INT_8 && + cbor_int_get_width(item) <= CBOR_INT_64); // cppcheck-suppress missingReturn switch (cbor_int_get_width(item)) { case CBOR_INT_8: @@ -184,12 +202,17 @@ size_t cbor_serialize_uint(const cbor_item_t *item, unsigned char *buffer, return cbor_encode_uint32(cbor_get_uint32(item), buffer, buffer_size); case CBOR_INT_64: return cbor_encode_uint64(cbor_get_uint64(item), buffer, buffer_size); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } -size_t cbor_serialize_negint(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_negint(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_negint(item)); + CBOR_ASSERT(cbor_int_get_width(item) >= CBOR_INT_8 && + cbor_int_get_width(item) <= CBOR_INT_64); // cppcheck-suppress missingReturn switch (cbor_int_get_width(item)) { case CBOR_INT_8: @@ -200,10 +223,13 @@ size_t cbor_serialize_negint(const cbor_item_t *item, unsigned char *buffer, return cbor_encode_negint32(cbor_get_uint32(item), buffer, buffer_size); case CBOR_INT_64: return cbor_encode_negint64(cbor_get_uint64(item), buffer, buffer_size); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } -size_t cbor_serialize_bytestring(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_bytestring(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_bytestring(item)); if (cbor_bytestring_is_definite(item)) { @@ -220,7 +246,7 @@ size_t cbor_serialize_bytestring(const cbor_item_t *item, unsigned char *buffer, size_t written = cbor_encode_indef_bytestring_start(buffer, buffer_size); if (written == 0) return 0; - cbor_item_t **chunks = cbor_bytestring_chunks_handle(item); + cbor_item_t** chunks = cbor_bytestring_chunks_handle(item); for (size_t i = 0; i < chunk_count; i++) { size_t chunk_written = cbor_serialize_bytestring( chunks[i], buffer + written, buffer_size - written); @@ -235,7 +261,7 @@ size_t cbor_serialize_bytestring(const cbor_item_t *item, unsigned char *buffer, } } -size_t cbor_serialize_string(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_string(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_string(item)); if (cbor_string_is_definite(item)) { @@ -252,7 +278,7 @@ size_t cbor_serialize_string(const cbor_item_t *item, unsigned char *buffer, size_t written = cbor_encode_indef_string_start(buffer, buffer_size); if (written == 0) return 0; - cbor_item_t **chunks = cbor_string_chunks_handle(item); + cbor_item_t** chunks = cbor_string_chunks_handle(item); for (size_t i = 0; i < chunk_count; i++) { size_t chunk_written = cbor_serialize_string(chunks[i], buffer + written, buffer_size - written); @@ -267,11 +293,11 @@ size_t cbor_serialize_string(const cbor_item_t *item, unsigned char *buffer, } } -size_t cbor_serialize_array(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_array(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_array(item)); size_t size = cbor_array_size(item), written = 0; - cbor_item_t **handle = cbor_array_handle(item); + cbor_item_t** handle = cbor_array_handle(item); if (cbor_array_is_definite(item)) { written = cbor_encode_array_start(size, buffer, buffer_size); } else { @@ -298,11 +324,11 @@ size_t cbor_serialize_array(const cbor_item_t *item, unsigned char *buffer, } } -size_t cbor_serialize_map(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_map(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_map(item)); size_t size = cbor_map_size(item), written = 0; - struct cbor_pair *handle = cbor_map_handle(item); + struct cbor_pair* handle = cbor_map_handle(item); if (cbor_map_is_definite(item)) { written = cbor_encode_map_start(size, buffer, buffer_size); @@ -336,7 +362,7 @@ size_t cbor_serialize_map(const cbor_item_t *item, unsigned char *buffer, } } -size_t cbor_serialize_tag(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_tag(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_tag(item)); size_t written = cbor_encode_tag(cbor_tag_value(item), buffer, buffer_size); @@ -348,9 +374,11 @@ size_t cbor_serialize_tag(const cbor_item_t *item, unsigned char *buffer, return written + item_written; } -size_t cbor_serialize_float_ctrl(const cbor_item_t *item, unsigned char *buffer, +size_t cbor_serialize_float_ctrl(const cbor_item_t* item, unsigned char* buffer, size_t buffer_size) { CBOR_ASSERT(cbor_isa_float_ctrl(item)); + CBOR_ASSERT(cbor_float_get_width(item) >= CBOR_FLOAT_0 && + cbor_float_get_width(item) <= CBOR_FLOAT_64); // cppcheck-suppress missingReturn switch (cbor_float_get_width(item)) { case CBOR_FLOAT_0: @@ -364,5 +392,8 @@ size_t cbor_serialize_float_ctrl(const cbor_item_t *item, unsigned char *buffer, case CBOR_FLOAT_64: return cbor_encode_double(cbor_float_get_float8(item), buffer, buffer_size); + default: // LCOV_EXCL_START + _CBOR_UNREACHABLE; + return 0; // LCOV_EXCL_STOP } } diff --git a/contrib/libcbor/src/cbor/serialization.h b/contrib/libcbor/src/cbor/serialization.h index 228ae75d6011..66cb1ef4d696 100644 --- a/contrib/libcbor/src/cbor/serialization.h +++ b/contrib/libcbor/src/cbor/serialization.h @@ -28,7 +28,7 @@ extern "C" { * @param buffer_size Size of the \p buffer * @return Length of the result. 0 on failure. */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize(const cbor_item_t *item, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize(const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); @@ -42,7 +42,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize(const cbor_item_t *item, * `size_t`. */ _CBOR_NODISCARD CBOR_EXPORT size_t -cbor_serialized_size(const cbor_item_t *item); +cbor_serialized_size(const cbor_item_t* item); /** Serialize the given item, allocating buffers as needed * @@ -51,8 +51,9 @@ cbor_serialized_size(const cbor_item_t *item); * ignore the return value. * * \rst - * .. warning:: It is the caller's responsibility to free the buffer using an - * appropriate ``free`` implementation. + * .. warning:: + * It is the caller's responsibility to free the buffer using an appropriate + * ``free`` implementation. * \endrst * * @param item A data item @@ -62,9 +63,9 @@ cbor_serialized_size(const cbor_item_t *item); * @return Length of the result in bytes * @return 0 on memory allocation failure, in which case \p buffer is `NULL`. */ -CBOR_EXPORT size_t cbor_serialize_alloc(const cbor_item_t *item, - unsigned char **buffer, - size_t *buffer_size); +CBOR_EXPORT size_t cbor_serialize_alloc(const cbor_item_t* item, + unsigned char** buffer, + size_t* buffer_size); /** Serialize an uint * @@ -74,7 +75,7 @@ CBOR_EXPORT size_t cbor_serialize_alloc(const cbor_item_t *item, * @return Length of the result * @return 0 if the \p buffer_size doesn't fit the result */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_uint(const cbor_item_t *item, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_uint(const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); @@ -87,7 +88,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_uint(const cbor_item_t *item, * @return 0 if the \p buffer_size doesn't fit the result */ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_negint( - const cbor_item_t *item, cbor_mutable_data buffer, size_t buffer_size); + const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); /** Serialize a bytestring * @@ -99,7 +100,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_negint( * still be modified */ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_bytestring( - const cbor_item_t *item, cbor_mutable_data buffer, size_t buffer_size); + const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); /** Serialize a string * @@ -111,7 +112,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_bytestring( * still be modified */ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_string( - const cbor_item_t *item, cbor_mutable_data buffer, size_t buffer_size); + const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); /** Serialize an array * * @param item An array @@ -122,7 +123,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_string( * still be modified */ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_array( - const cbor_item_t *item, cbor_mutable_data buffer, size_t buffer_size); + const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); /** Serialize a map * @@ -133,7 +134,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_array( * @return 0 if the \p buffer_size doesn't fit the result. The \p buffer may * still be modified */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_map(const cbor_item_t *item, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_map(const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); @@ -146,7 +147,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_map(const cbor_item_t *item, * @return 0 if the \p buffer_size doesn't fit the result. The \p buffer may * still be modified */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_tag(const cbor_item_t *item, +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_tag(const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); @@ -159,7 +160,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_tag(const cbor_item_t *item, * @return 0 if the \p buffer_size doesn't fit the result */ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_serialize_float_ctrl( - const cbor_item_t *item, cbor_mutable_data buffer, size_t buffer_size); + const cbor_item_t* item, cbor_mutable_data buffer, size_t buffer_size); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/cbor/streaming.c b/contrib/libcbor/src/cbor/streaming.c index 4b37701143ab..426df78b7318 100644 --- a/contrib/libcbor/src/cbor/streaming.c +++ b/contrib/libcbor/src/cbor/streaming.c @@ -9,7 +9,7 @@ #include "internal/loaders.h" static bool claim_bytes(size_t required, size_t provided, - struct cbor_decoder_result *result) { + struct cbor_decoder_result* result) { if (required > (provided - result->read)) { result->required = required + result->read; result->read = 0; @@ -42,7 +42,7 @@ static bool claim_bytes(size_t required, size_t provided, struct cbor_decoder_result cbor_stream_decode( cbor_data source, size_t source_size, - const struct cbor_callbacks *callbacks, void *context) { + const struct cbor_callbacks* callbacks, void* context) { // Attempt to claim the initial MTB byte struct cbor_decoder_result result = {.status = CBOR_DECODER_FINISHED}; if (!claim_bytes(1, source_size, &result)) { @@ -592,9 +592,10 @@ struct cbor_decoder_result cbor_stream_decode( case 0xFF: /* Break */ callbacks->indef_break(context); - // Never happens, the switch statement is exhaustive on the 1B range; make - // compiler happy - default: return result; + default: // LCOV_EXCL_START + // Never happens, the switch statement is exhaustive on the 1B range + _CBOR_UNREACHABLE; + return result; // LCOV_EXCL_STOP } } diff --git a/contrib/libcbor/src/cbor/strings.c b/contrib/libcbor/src/cbor/strings.c index 6ae96545cfe3..4ef4fa3c9b73 100644 --- a/contrib/libcbor/src/cbor/strings.c +++ b/contrib/libcbor/src/cbor/strings.c @@ -10,18 +10,20 @@ #include "internal/memory_utils.h" #include "internal/unicode.h" -cbor_item_t *cbor_new_definite_string(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_definite_string(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .refcount = 1, .type = CBOR_TYPE_STRING, - .metadata = {.string_metadata = {_CBOR_METADATA_DEFINITE, 0}}}; + .metadata = {.string_metadata = {.type = _CBOR_METADATA_DEFINITE, + .codepoint_count = 0, + .length = 0}}}; return item; } -cbor_item_t *cbor_new_indefinite_string(void) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_indefinite_string(void) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ .refcount = 1, @@ -30,7 +32,7 @@ cbor_item_t *cbor_new_indefinite_string(void) { .length = 0}}, .data = _cbor_malloc(sizeof(struct cbor_indefinite_string_data))}; _CBOR_DEPENDENT_NOTNULL(item, item->data); - *((struct cbor_indefinite_string_data *)item->data) = + *((struct cbor_indefinite_string_data*)item->data) = (struct cbor_indefinite_string_data){ .chunk_count = 0, .chunk_capacity = 0, @@ -39,28 +41,28 @@ cbor_item_t *cbor_new_indefinite_string(void) { return item; } -cbor_item_t *cbor_build_string(const char *val) { - cbor_item_t *item = cbor_new_definite_string(); +cbor_item_t* cbor_build_string(const char* val) { + cbor_item_t* item = cbor_new_definite_string(); _CBOR_NOTNULL(item); size_t len = strlen(val); - unsigned char *handle = _cbor_malloc(len); + unsigned char* handle = _cbor_malloc(len); _CBOR_DEPENDENT_NOTNULL(item, handle); memcpy(handle, val, len); cbor_string_set_handle(item, handle, len); return item; } -cbor_item_t *cbor_build_stringn(const char *val, size_t length) { - cbor_item_t *item = cbor_new_definite_string(); +cbor_item_t* cbor_build_stringn(const char* val, size_t length) { + cbor_item_t* item = cbor_new_definite_string(); _CBOR_NOTNULL(item); - unsigned char *handle = _cbor_malloc(length); + unsigned char* handle = _cbor_malloc(length); _CBOR_DEPENDENT_NOTNULL(item, handle); memcpy(handle, val, length); cbor_string_set_handle(item, handle, length); return item; } -void cbor_string_set_handle(cbor_item_t *item, +void cbor_string_set_handle(cbor_item_t* item, cbor_mutable_data CBOR_RESTRICT_POINTER data, size_t length) { CBOR_ASSERT(cbor_isa_string(item)); @@ -78,23 +80,23 @@ void cbor_string_set_handle(cbor_item_t *item, } } -cbor_item_t **cbor_string_chunks_handle(const cbor_item_t *item) { +cbor_item_t** cbor_string_chunks_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); CBOR_ASSERT(cbor_string_is_indefinite(item)); - return ((struct cbor_indefinite_string_data *)item->data)->chunks; + return ((struct cbor_indefinite_string_data*)item->data)->chunks; } -size_t cbor_string_chunk_count(const cbor_item_t *item) { +size_t cbor_string_chunk_count(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); CBOR_ASSERT(cbor_string_is_indefinite(item)); - return ((struct cbor_indefinite_string_data *)item->data)->chunk_count; + return ((struct cbor_indefinite_string_data*)item->data)->chunk_count; } -bool cbor_string_add_chunk(cbor_item_t *item, cbor_item_t *chunk) { +bool cbor_string_add_chunk(cbor_item_t* item, cbor_item_t* chunk) { CBOR_ASSERT(cbor_isa_string(item)); CBOR_ASSERT(cbor_string_is_indefinite(item)); - struct cbor_indefinite_string_data *data = - (struct cbor_indefinite_string_data *)item->data; + struct cbor_indefinite_string_data* data = + (struct cbor_indefinite_string_data*)item->data; if (data->chunk_count == data->chunk_capacity) { if (!_cbor_safe_to_multiply(CBOR_BUFFER_GROWTH, data->chunk_capacity)) { return false; @@ -103,8 +105,8 @@ bool cbor_string_add_chunk(cbor_item_t *item, cbor_item_t *chunk) { size_t new_chunk_capacity = data->chunk_capacity == 0 ? 1 : CBOR_BUFFER_GROWTH * (data->chunk_capacity); - cbor_item_t **new_chunks_data = _cbor_realloc_multiple( - data->chunks, sizeof(cbor_item_t *), new_chunk_capacity); + cbor_item_t** new_chunks_data = _cbor_realloc_multiple( + data->chunks, sizeof(cbor_item_t*), new_chunk_capacity); if (new_chunks_data == NULL) { return false; @@ -117,26 +119,26 @@ bool cbor_string_add_chunk(cbor_item_t *item, cbor_item_t *chunk) { return true; } -size_t cbor_string_length(const cbor_item_t *item) { +size_t cbor_string_length(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); return item->metadata.string_metadata.length; } -unsigned char *cbor_string_handle(const cbor_item_t *item) { +unsigned char* cbor_string_handle(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); return item->data; } -size_t cbor_string_codepoint_count(const cbor_item_t *item) { +size_t cbor_string_codepoint_count(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); return item->metadata.string_metadata.codepoint_count; } -bool cbor_string_is_definite(const cbor_item_t *item) { +bool cbor_string_is_definite(const cbor_item_t* item) { CBOR_ASSERT(cbor_isa_string(item)); return item->metadata.string_metadata.type == _CBOR_METADATA_DEFINITE; } -bool cbor_string_is_indefinite(const cbor_item_t *item) { +bool cbor_string_is_indefinite(const cbor_item_t* item) { return !cbor_string_is_definite(item); } diff --git a/contrib/libcbor/src/cbor/strings.h b/contrib/libcbor/src/cbor/strings.h index 3e03f81385f0..a026f4e5c95a 100644 --- a/contrib/libcbor/src/cbor/strings.h +++ b/contrib/libcbor/src/cbor/strings.h @@ -29,7 +29,7 @@ extern "C" { * @param item a definite string * @return length of the string. Zero if no chunk has been attached yet */ -_CBOR_NODISCARD CBOR_EXPORT size_t cbor_string_length(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT size_t cbor_string_length(const cbor_item_t* item); /** The number of codepoints in this string * @@ -40,7 +40,7 @@ _CBOR_NODISCARD CBOR_EXPORT size_t cbor_string_length(const cbor_item_t *item); * @return The number of codepoints in this string */ _CBOR_NODISCARD CBOR_EXPORT size_t -cbor_string_codepoint_count(const cbor_item_t *item); +cbor_string_codepoint_count(const cbor_item_t* item); /** Is the string definite? * @@ -48,7 +48,7 @@ cbor_string_codepoint_count(const cbor_item_t *item); * @return Is the string definite? */ _CBOR_NODISCARD CBOR_EXPORT bool cbor_string_is_definite( - const cbor_item_t *item); + const cbor_item_t* item); /** Is the string indefinite? * @@ -56,7 +56,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_string_is_definite( * @return Is the string indefinite? */ _CBOR_NODISCARD CBOR_EXPORT bool cbor_string_is_indefinite( - const cbor_item_t *item); + const cbor_item_t* item); /** Get the handle to the underlying string * @@ -68,7 +68,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_string_is_indefinite( * @return `NULL` if no data have been assigned yet. */ _CBOR_NODISCARD CBOR_EXPORT cbor_mutable_data -cbor_string_handle(const cbor_item_t *item); +cbor_string_handle(const cbor_item_t* item); /** Set the handle to the underlying string * @@ -76,9 +76,10 @@ cbor_string_handle(const cbor_item_t *item); * and invalid, `cbor_string_codepoint_count` will return 0. * * \rst - * .. warning:: Using a pointer to a stack allocated constant is a common - * mistake. Lifetime of the string will expire when it goes out of scope and - * the CBOR item will be left inconsistent. + * .. warning:: + * Using a pointer to a stack allocated constant is a common mistake. + * Lifetime of the string will expire when it goes out of scope and the CBOR + * item will be left inconsistent. * \endrst * * @param item A definite string @@ -87,7 +88,7 @@ cbor_string_handle(const cbor_item_t *item); * @param length Length of the data block */ CBOR_EXPORT void cbor_string_set_handle( - cbor_item_t *item, cbor_mutable_data CBOR_RESTRICT_POINTER data, + cbor_item_t* item, cbor_mutable_data CBOR_RESTRICT_POINTER data, size_t length); /** Get the handle to the array of chunks @@ -98,8 +99,8 @@ CBOR_EXPORT void cbor_string_set_handle( * @param item A indefinite string * @return array of #cbor_string_chunk_count definite strings */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t **cbor_string_chunks_handle( - const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t** cbor_string_chunks_handle( + const cbor_item_t* item); /** Get the number of chunks this string consist of * @@ -107,7 +108,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t **cbor_string_chunks_handle( * @return The chunk count. 0 for freshly created items. */ _CBOR_NODISCARD CBOR_EXPORT size_t -cbor_string_chunk_count(const cbor_item_t *item); +cbor_string_chunk_count(const cbor_item_t* item); /** Appends a chunk to the string * @@ -122,8 +123,8 @@ cbor_string_chunk_count(const cbor_item_t *item); * case, the refcount of @p `chunk` is not increased and the @p `item` is left * intact. */ -_CBOR_NODISCARD CBOR_EXPORT bool cbor_string_add_chunk(cbor_item_t *item, - cbor_item_t *chunk); +_CBOR_NODISCARD CBOR_EXPORT bool cbor_string_add_chunk(cbor_item_t* item, + cbor_item_t* chunk); /** Creates a new definite string * @@ -133,7 +134,7 @@ _CBOR_NODISCARD CBOR_EXPORT bool cbor_string_add_chunk(cbor_item_t *item, * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_definite_string(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_definite_string(void); /** Creates a new indefinite string * @@ -143,7 +144,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_definite_string(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_indefinite_string(void); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_indefinite_string(void); /** Creates a new string and initializes it * @@ -158,7 +159,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_indefinite_string(void); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_string(const char *val); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_string(const char* val); /** Creates a new string and initializes it * @@ -173,7 +174,7 @@ _CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_string(const char *val); * initialized to one. * @return `NULL` if memory allocation fails */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_stringn(const char *val, +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_stringn(const char* val, size_t length); #ifdef __cplusplus diff --git a/contrib/libcbor/src/cbor/tags.c b/contrib/libcbor/src/cbor/tags.c index 3f3edb0b0e1d..343a1cda5603 100644 --- a/contrib/libcbor/src/cbor/tags.c +++ b/contrib/libcbor/src/cbor/tags.c @@ -7,8 +7,8 @@ #include "tags.h" -cbor_item_t *cbor_new_tag(uint64_t value) { - cbor_item_t *item = _cbor_malloc(sizeof(cbor_item_t)); +cbor_item_t* cbor_new_tag(uint64_t value) { + cbor_item_t* item = _cbor_malloc(sizeof(cbor_item_t)); _CBOR_NOTNULL(item); *item = (cbor_item_t){ @@ -20,24 +20,24 @@ cbor_item_t *cbor_new_tag(uint64_t value) { return item; } -cbor_item_t *cbor_tag_item(const cbor_item_t *item) { - CBOR_ASSERT(cbor_isa_tag(item)); - return cbor_incref(item->metadata.tag_metadata.tagged_item); +cbor_item_t* cbor_tag_item(const cbor_item_t* tag) { + CBOR_ASSERT(cbor_isa_tag(tag)); + return cbor_incref(tag->metadata.tag_metadata.tagged_item); } -uint64_t cbor_tag_value(const cbor_item_t *item) { - CBOR_ASSERT(cbor_isa_tag(item)); - return item->metadata.tag_metadata.value; +uint64_t cbor_tag_value(const cbor_item_t* tag) { + CBOR_ASSERT(cbor_isa_tag(tag)); + return tag->metadata.tag_metadata.value; } -void cbor_tag_set_item(cbor_item_t *item, cbor_item_t *tagged_item) { - CBOR_ASSERT(cbor_isa_tag(item)); +void cbor_tag_set_item(cbor_item_t* tag, cbor_item_t* tagged_item) { + CBOR_ASSERT(cbor_isa_tag(tag)); cbor_incref(tagged_item); - item->metadata.tag_metadata.tagged_item = tagged_item; + tag->metadata.tag_metadata.tagged_item = tagged_item; } -cbor_item_t *cbor_build_tag(uint64_t value, cbor_item_t *item) { - cbor_item_t *res = cbor_new_tag(value); +cbor_item_t* cbor_build_tag(uint64_t value, cbor_item_t* item) { + cbor_item_t* res = cbor_new_tag(value); if (res == NULL) { return NULL; } diff --git a/contrib/libcbor/src/cbor/tags.h b/contrib/libcbor/src/cbor/tags.h index a7365df10208..360cc861a53f 100644 --- a/contrib/libcbor/src/cbor/tags.h +++ b/contrib/libcbor/src/cbor/tags.h @@ -21,51 +21,55 @@ extern "C" { * ============================================================================ */ -/** Create a new tag +/** Create a new tag. * - * @param value The tag value. Please consult the tag repository - * @return Reference to the new tag item. The item's reference count is - * initialized to one. - * @return `NULL` if memory allocation fails + * @param value The tag value (number). + * @return Reference to the new tag. Its reference count is initialized to one + * and it points to a `NULL` item. + * @return `NULL` if memory allocation fails. */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_new_tag(uint64_t value); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_new_tag(uint64_t value); -/** Get the tagged item +/** Get the tagged item (what the tag points to). * - * @param item A tag - * @return Reference to the tagged item + * @param tag A #CBOR_TYPE_TAG tag. + * @return Reference to the tagged item. * * Increases the reference count of the underlying item. The returned reference * must be released using #cbor_decref. */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_tag_item(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_tag_item(const cbor_item_t* tag); -/** Get tag value +/** Get the tag value. * - * @param item A tag - * @return The tag value. Please consult the tag repository + * @param tag A #CBOR_TYPE_TAG tag. + * @return The tag value (number). */ -_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_tag_value(const cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT uint64_t cbor_tag_value(const cbor_item_t* tag); -/** Set the tagged item +/** Assign a tag to an item. * - * @param item A tag + * @param tag A #CBOR_TYPE_TAG tag. * @param tagged_item The item to tag. Its reference count will be increased * by one. + * + * If the tag already points to an item, the pointer will be replaced, without a + * reference count change on the previous item. + * TODO: Should we release the reference automatically? */ -CBOR_EXPORT void cbor_tag_set_item(cbor_item_t *item, cbor_item_t *tagged_item); +CBOR_EXPORT void cbor_tag_set_item(cbor_item_t* tag, cbor_item_t* tagged_item); -/** Build a new tag +/** Build a new tag. * * @param item The item to tag. Its reference count will be increased by * one. - * @param value Tag value + * @param value The tag value (number). * @return Reference to the new tag item. The item's reference count is * initialized to one. - * @return `NULL` if memory allocation fails + * @return `NULL` if memory allocation fails. */ -_CBOR_NODISCARD CBOR_EXPORT cbor_item_t *cbor_build_tag(uint64_t value, - cbor_item_t *item); +_CBOR_NODISCARD CBOR_EXPORT cbor_item_t* cbor_build_tag(uint64_t value, + cbor_item_t* item); #ifdef __cplusplus } diff --git a/contrib/libcbor/src/libcborConfig.cmake.in b/contrib/libcbor/src/libcborConfig.cmake.in index 565bed365e24..5c8095c9d4bf 100644 --- a/contrib/libcbor/src/libcborConfig.cmake.in +++ b/contrib/libcbor/src/libcborConfig.cmake.in @@ -2,6 +2,9 @@ set(CBOR_VERSION @CBOR_VERSION@) @PACKAGE_INIT@ +include("${CMAKE_CURRENT_LIST_DIR}/libcborTargets.cmake") + +# legacy set_and_check(CBOR_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@") set_and_check(CBOR_INCLUDE_DIRS "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@") diff --git a/contrib/libcbor/test/CMakeLists.txt b/contrib/libcbor/test/CMakeLists.txt index 9721fd03a166..626266aeba90 100644 --- a/contrib/libcbor/test/CMakeLists.txt +++ b/contrib/libcbor/test/CMakeLists.txt @@ -6,26 +6,22 @@ message(STATUS "CMocka vars: ${CMOCKA_LIBRARIES} ${CMOCKA_INCLUDE_DIR}") find_library(MATH_LIBRARY m) -CHECK_INCLUDE_FILE("execinfo.h" HAS_EXECINFO) +check_include_file("execinfo.h" HAS_EXECINFO) -foreach (TEST ${TESTS}) - string(REGEX REPLACE ".*/([^/]+).c" "\\1" NAME ${TEST}) - message("Adding test ${NAME}") - add_executable(${NAME} "${NAME}.c" assertions.c stream_expectations.c test_allocator.c) - target_link_libraries(${NAME} ${CMOCKA_LIBRARIES}) - target_link_libraries(${NAME} cbor) - if(MATH_LIBRARY) - target_link_libraries(${NAME} ${MATH_LIBRARY}) - endif() - target_include_directories(${NAME} PUBLIC ${CMOCKA_INCLUDE_DIR}) - # See https://stackoverflow.com/a/10824578/499521 - ADD_TEST(ctest_build_test_${NAME} - "${CMAKE_COMMAND}" --build ${CMAKE_BINARY_DIR} --target ${NAME}) - ADD_TEST(ctest_run_${NAME} ${NAME}) - SET_TESTS_PROPERTIES(ctest_run_${NAME} - PROPERTIES DEPENDS ctest_build_test_${NAME}) - add_dependencies(coverage ${NAME}) -endforeach (TEST) +foreach(test_file ${TESTS}) + string(REGEX REPLACE ".*/([^/]+).c" "\\1" NAME ${test_file}) + message("Adding test ${NAME}") + add_executable(${NAME} "${NAME}.c" assertions.c stream_expectations.c + test_allocator.c) + target_link_libraries(${NAME} ${CMOCKA_LIBRARIES}) + target_link_libraries(${NAME} cbor) + if(MATH_LIBRARY) + target_link_libraries(${NAME} ${MATH_LIBRARY}) + endif() + target_include_directories(${NAME} PUBLIC ${CMOCKA_INCLUDE_DIR}) + add_test(NAME ${NAME} COMMAND ${NAME}) + add_dependencies(coverage ${NAME}) +endforeach() add_executable(cpp_linkage_test cpp_linkage_test.cpp) -target_link_libraries(cpp_linkage_test cbor)
\ No newline at end of file +target_link_libraries(cpp_linkage_test cbor) diff --git a/contrib/libcbor/test/array_encoders_test.c b/contrib/libcbor/test/array_encoders_test.c index 54a28bd94c6b..baab0d54ff57 100644 --- a/contrib/libcbor/test/array_encoders_test.c +++ b/contrib/libcbor/test/array_encoders_test.c @@ -10,27 +10,27 @@ unsigned char buffer[512]; -static void test_embedded_array_start(void **_CBOR_UNUSED(_state)) { +static void test_embedded_array_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_array_start(1, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x81}), 1); } -static void test_array_start(void **_CBOR_UNUSED(_state)) { +static void test_array_start(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_array_start(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x9A, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_indef_array_start(void **_CBOR_UNUSED(_state)) { +static void test_indef_array_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_indef_array_start(buffer, 512)); assert_size_equal(0, cbor_encode_indef_array_start(buffer, 0)); assert_memory_equal(buffer, ((unsigned char[]){0x9F}), 1); } -static void test_indef_array_encoding(void **_CBOR_UNUSED(_state)) { - cbor_item_t *array = cbor_new_indefinite_array(); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_indef_array_encoding(void** _state _CBOR_UNUSED) { + cbor_item_t* array = cbor_new_indefinite_array(); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_array_push(array, one)); assert_true(cbor_array_push(array, two)); diff --git a/contrib/libcbor/test/array_test.c b/contrib/libcbor/test/array_test.c index 1a241c051f2c..20e51eea6d79 100644 --- a/contrib/libcbor/test/array_test.c +++ b/contrib/libcbor/test/array_test.c @@ -9,12 +9,12 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *arr; +cbor_item_t* arr; struct cbor_load_result res; unsigned char data1[] = {0x80, 0xFF}; -static void test_empty_array(void **_CBOR_UNUSED(_state)) { +static void test_empty_array(void** _state _CBOR_UNUSED) { arr = cbor_load(data1, 2, &res); assert_non_null(arr); assert_true(cbor_typeof(arr) == CBOR_TYPE_ARRAY); @@ -27,7 +27,7 @@ static void test_empty_array(void **_CBOR_UNUSED(_state)) { unsigned char data2[] = {0x81, 0x01, 0xFF}; -static void test_simple_array(void **_CBOR_UNUSED(_state)) { +static void test_simple_array(void** _state _CBOR_UNUSED) { arr = cbor_load(data2, 3, &res); assert_non_null(arr); assert_true(cbor_typeof(arr) == CBOR_TYPE_ARRAY); @@ -37,10 +37,10 @@ static void test_simple_array(void **_CBOR_UNUSED(_state)) { assert_size_equal(cbor_array_allocated(arr), 1); /* Check the values */ assert_uint8(cbor_array_handle(arr)[0], 1); - cbor_item_t *intermediate = cbor_array_get(arr, 0); + cbor_item_t* intermediate = cbor_array_get(arr, 0); assert_uint8(intermediate, 1); - cbor_item_t *new_val = cbor_build_uint8(10); + cbor_item_t* new_val = cbor_build_uint8(10); assert_false(cbor_array_set(arr, 1, new_val)); assert_false(cbor_array_set(arr, 3, new_val)); cbor_decref(&new_val); @@ -53,7 +53,7 @@ static void test_simple_array(void **_CBOR_UNUSED(_state)) { unsigned char data3[] = {0x82, 0x01, 0x81, 0x01, 0xFF}; -static void test_nested_arrays(void **_CBOR_UNUSED(_state)) { +static void test_nested_arrays(void** _state _CBOR_UNUSED) { arr = cbor_load(data3, 5, &res); assert_non_null(arr); assert_true(cbor_typeof(arr) == CBOR_TYPE_ARRAY); @@ -63,7 +63,7 @@ static void test_nested_arrays(void **_CBOR_UNUSED(_state)) { /* Check the values */ assert_uint8(cbor_array_handle(arr)[0], 1); - cbor_item_t *nested = cbor_array_handle(arr)[1]; + cbor_item_t* nested = cbor_array_handle(arr)[1]; assert_true(cbor_isa_array(nested)); assert_true(cbor_array_size(nested) == 1); assert_uint8(cbor_array_handle(nested)[0], 1); @@ -74,7 +74,7 @@ static void test_nested_arrays(void **_CBOR_UNUSED(_state)) { unsigned char test_indef_arrays_data[] = {0x9f, 0x01, 0x02, 0xFF}; -static void test_indef_arrays(void **_CBOR_UNUSED(_state)) { +static void test_indef_arrays(void** _state _CBOR_UNUSED) { arr = cbor_load(test_indef_arrays_data, 4, &res); assert_non_null(arr); assert_true(cbor_typeof(arr) == CBOR_TYPE_ARRAY); @@ -94,7 +94,7 @@ static void test_indef_arrays(void **_CBOR_UNUSED(_state)) { unsigned char test_nested_indef_arrays_data[] = {0x9f, 0x01, 0x9f, 0x02, 0xFF, 0x03, 0xFF}; -static void test_nested_indef_arrays(void **_CBOR_UNUSED(_state)) { +static void test_nested_indef_arrays(void** _state _CBOR_UNUSED) { arr = cbor_load(test_nested_indef_arrays_data, 7, &res); assert_non_null(arr); assert_true(cbor_typeof(arr) == CBOR_TYPE_ARRAY); @@ -104,7 +104,7 @@ static void test_nested_indef_arrays(void **_CBOR_UNUSED(_state)) { /* Check the values */ assert_uint8(cbor_array_handle(arr)[0], 1); - cbor_item_t *nested = cbor_array_handle(arr)[1]; + cbor_item_t* nested = cbor_array_handle(arr)[1]; assert_true(cbor_isa_array(nested)); assert_true(cbor_array_size(nested) == 1); assert_uint8(cbor_array_handle(nested)[0], 2); @@ -113,11 +113,11 @@ static void test_nested_indef_arrays(void **_CBOR_UNUSED(_state)) { assert_null(arr); } -static void test_array_replace(void **_CBOR_UNUSED(_state)) { - cbor_item_t *array = cbor_new_definite_array(2); +static void test_array_replace(void** _state _CBOR_UNUSED) { + cbor_item_t* array = cbor_new_definite_array(2); assert_size_equal(cbor_array_size(array), 0); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *three = cbor_build_uint8(3); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* three = cbor_build_uint8(3); assert_size_equal(cbor_refcount(one), 1); assert_size_equal(cbor_refcount(three), 1); @@ -147,11 +147,11 @@ static void test_array_replace(void **_CBOR_UNUSED(_state)) { cbor_decref(&array); } -static void test_array_push_overflow(void **_CBOR_UNUSED(_state)) { - cbor_item_t *array = cbor_new_indefinite_array(); - cbor_item_t *one = cbor_build_uint8(1); - struct _cbor_array_metadata *metadata = - (struct _cbor_array_metadata *)&array->metadata; +static void test_array_push_overflow(void** _state _CBOR_UNUSED) { + cbor_item_t* array = cbor_new_indefinite_array(); + cbor_item_t* one = cbor_build_uint8(1); + struct _cbor_array_metadata* metadata = + (struct _cbor_array_metadata*)&array->metadata; // Pretend we already have a huge block allocated metadata->allocated = SIZE_MAX; metadata->end_ptr = SIZE_MAX; @@ -165,7 +165,7 @@ static void test_array_push_overflow(void **_CBOR_UNUSED(_state)) { cbor_decref(&array); } -static void test_array_creation(void **_CBOR_UNUSED(_state)) { +static void test_array_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_definite_array(42)); }); WITH_MOCK_MALLOC({ assert_null(cbor_new_definite_array(42)); }, 2, MALLOC, MALLOC_FAIL); @@ -173,11 +173,11 @@ static void test_array_creation(void **_CBOR_UNUSED(_state)) { WITH_FAILING_MALLOC({ assert_null(cbor_new_indefinite_array()); }); } -static void test_array_push(void **_CBOR_UNUSED(_state)) { +static void test_array_push(void** _state _CBOR_UNUSED) { WITH_MOCK_MALLOC( { - cbor_item_t *array = cbor_new_indefinite_array(); - cbor_item_t *string = cbor_build_string("Hello!"); + cbor_item_t* array = cbor_new_indefinite_array(); + cbor_item_t* string = cbor_build_string("Hello!"); assert_false(cbor_array_push(array, string)); assert_size_equal(cbor_array_allocated(array), 0); @@ -191,10 +191,10 @@ static void test_array_push(void **_CBOR_UNUSED(_state)) { } static unsigned char simple_indef_array[] = {0x9F, 0x01, 0x02, 0xFF}; -static void test_indef_array_decode(void **_CBOR_UNUSED(_state)) { +static void test_indef_array_decode(void** _state _CBOR_UNUSED) { WITH_MOCK_MALLOC( { - cbor_item_t *array; + cbor_item_t* array; struct cbor_load_result res; array = cbor_load(simple_indef_array, 4, &res); diff --git a/contrib/libcbor/test/bad_inputs_test.c b/contrib/libcbor/test/bad_inputs_test.c index de7bdab95231..e3cf02f01680 100644 --- a/contrib/libcbor/test/bad_inputs_test.c +++ b/contrib/libcbor/test/bad_inputs_test.c @@ -11,12 +11,12 @@ /* These tests verify behavior on interesting randomly generated inputs from the * fuzzer */ -cbor_item_t *item; +cbor_item_t* item; struct cbor_load_result res; /* Map start + array with embedded length */ unsigned char data1[] = {0xA9, 0x85}; -static void test_1(void **_CBOR_UNUSED(_state)) { +static void test_1(void** _state _CBOR_UNUSED) { item = cbor_load(data1, 2, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_NOTENOUGHDATA); @@ -24,7 +24,7 @@ static void test_1(void **_CBOR_UNUSED(_state)) { } unsigned char data2[] = {0x9D}; -static void test_2(void **_CBOR_UNUSED(_state)) { +static void test_2(void** _state _CBOR_UNUSED) { item = cbor_load(data2, 1, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_MALFORMATED); @@ -32,7 +32,7 @@ static void test_2(void **_CBOR_UNUSED(_state)) { } unsigned char data3[] = {0xD6}; -static void test_3(void **_CBOR_UNUSED(_state)) { +static void test_3(void** _state _CBOR_UNUSED) { item = cbor_load(data3, 1, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_NOTENOUGHDATA); @@ -41,7 +41,7 @@ static void test_3(void **_CBOR_UNUSED(_state)) { #ifdef SANE_MALLOC unsigned char data4[] = {0xBA, 0xC1, 0xE8, 0x3E, 0xE7, 0x20, 0xA8}; -static void test_4(void **_CBOR_UNUSED(_state)) { +static void test_4(void** _state _CBOR_UNUSED) { item = cbor_load(data4, 7, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_MEMERROR); @@ -49,7 +49,7 @@ static void test_4(void **_CBOR_UNUSED(_state)) { } unsigned char data5[] = {0x9A, 0xDA, 0x3A, 0xB2, 0x7F, 0x29}; -static void test_5(void **_CBOR_UNUSED(_state)) { +static void test_5(void** _state _CBOR_UNUSED) { assert_true(res.error.code == CBOR_ERR_MEMERROR); item = cbor_load(data5, 6, &res); assert_null(item); @@ -59,7 +59,7 @@ static void test_5(void **_CBOR_UNUSED(_state)) { #endif unsigned char data6[] = {0x7F, 0x21, 0x4C, 0x02, 0x40}; -static void test_6(void **_CBOR_UNUSED(_state)) { +static void test_6(void** _state _CBOR_UNUSED) { item = cbor_load(data6, 5, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_SYNTAXERROR); @@ -71,7 +71,7 @@ static void test_6(void **_CBOR_UNUSED(_state)) { * works with 64b sizes */ unsigned char data7[] = {0xA2, 0x9B, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; -static void test_7(void **_CBOR_UNUSED(_state)) { +static void test_7(void** _state _CBOR_UNUSED) { item = cbor_load(data7, 16, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_MEMERROR); @@ -84,7 +84,7 @@ unsigned char data8[] = {0xA3, 0x64, 0x68, 0x61, 0x6C, 0x66, 0xFF, 0x00, 0xFA, 0x7F, 0x7F, 0xFF, 0xFF, 0x6D, 0x73, 0x69, 0x6D, 0x70, 0x6C, 0x65, 0x20, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x73, 0x83, 0xF5, 0xF4, 0xF6}; -static void test_8(void **_CBOR_UNUSED(_state)) { +static void test_8(void** _state _CBOR_UNUSED) { item = cbor_load(data8, 39, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_SYNTAXERROR); @@ -92,7 +92,7 @@ static void test_8(void **_CBOR_UNUSED(_state)) { } unsigned char data9[] = {0xBF, 0x05, 0xFF, 0x00, 0x00, 0x00, 0x10, 0x04}; -static void test_9(void **_CBOR_UNUSED(_state)) { +static void test_9(void** _state _CBOR_UNUSED) { item = cbor_load(data9, 8, &res); assert_null(item); assert_true(res.error.code == CBOR_ERR_SYNTAXERROR); diff --git a/contrib/libcbor/test/bytestring_encoders_test.c b/contrib/libcbor/test/bytestring_encoders_test.c index 8e2fbe694055..850f84645bcf 100644 --- a/contrib/libcbor/test/bytestring_encoders_test.c +++ b/contrib/libcbor/test/bytestring_encoders_test.c @@ -11,18 +11,18 @@ unsigned char buffer[512]; -static void test_embedded_bytestring_start(void **_CBOR_UNUSED(_state)) { +static void test_embedded_bytestring_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_bytestring_start(1, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x41}), 1); } -static void test_bytestring_start(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_start(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_bytestring_start(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x5A, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_indef_bytestring_start(void **_CBOR_UNUSED(_state)) { +static void test_indef_bytestring_start(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_indef_bytestring_start(buffer, 0)); assert_size_equal(1, cbor_encode_indef_bytestring_start(buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x5F}), 1); diff --git a/contrib/libcbor/test/bytestring_test.c b/contrib/libcbor/test/bytestring_test.c index 08968a13e98e..5410aabdcdbc 100644 --- a/contrib/libcbor/test/bytestring_test.c +++ b/contrib/libcbor/test/bytestring_test.c @@ -9,7 +9,7 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *bs; +cbor_item_t* bs; struct cbor_load_result res; unsigned char data1[] = {0x40, 0xFF}; @@ -133,7 +133,7 @@ unsigned char data8[] = { 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF}; -static void test_empty_bs(void **_CBOR_UNUSED(_state)) { +static void test_empty_bs(void** _state _CBOR_UNUSED) { bs = cbor_load(data1, 2, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -144,7 +144,7 @@ static void test_empty_bs(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_embedded_bs(void **_CBOR_UNUSED(_state)) { +static void test_embedded_bs(void** _state _CBOR_UNUSED) { bs = cbor_load(data2, 2, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -157,13 +157,13 @@ static void test_embedded_bs(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_notenough_data(void **_CBOR_UNUSED(_state)) { +static void test_notenough_data(void** _state _CBOR_UNUSED) { bs = cbor_load(data3, 2, &res); assert_null(bs); assert_true(res.error.code == CBOR_ERR_NOTENOUGHDATA); } -static void test_short_bs1(void **_CBOR_UNUSED(_state)) { +static void test_short_bs1(void** _state _CBOR_UNUSED) { bs = cbor_load(data3, 4, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -176,7 +176,7 @@ static void test_short_bs1(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_short_bs2(void **_CBOR_UNUSED(_state)) { +static void test_short_bs2(void** _state _CBOR_UNUSED) { bs = cbor_load(data4, 259, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -188,7 +188,7 @@ static void test_short_bs2(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_half_bs(void **_CBOR_UNUSED(_state)) { +static void test_half_bs(void** _state _CBOR_UNUSED) { bs = cbor_load(data5, 259, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -200,7 +200,7 @@ static void test_half_bs(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_int_bs(void **_CBOR_UNUSED(_state)) { +static void test_int_bs(void** _state _CBOR_UNUSED) { bs = cbor_load(data6, 261, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -212,7 +212,7 @@ static void test_int_bs(void **_CBOR_UNUSED(_state)) { assert_null(bs); } -static void test_long_bs(void **_CBOR_UNUSED(_state)) { +static void test_long_bs(void** _state _CBOR_UNUSED) { bs = cbor_load(data7, 265, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -226,7 +226,7 @@ static void test_long_bs(void **_CBOR_UNUSED(_state)) { unsigned char data9[] = {0x5F, 0xFF}; -static void test_zero_indef(void **_CBOR_UNUSED(_state)) { +static void test_zero_indef(void** _state _CBOR_UNUSED) { bs = cbor_load(data9, 2, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -242,7 +242,7 @@ unsigned char data10[] = {0x5F, 0x58, 0x01, 0xA1, 0xFF, 0xFF}; /* start | bstring | break| extra */ -static void test_short_indef(void **_CBOR_UNUSED(_state)) { +static void test_short_indef(void** _state _CBOR_UNUSED) { bs = cbor_load(data10, 6, &res); assert_non_null(bs); assert_true(cbor_typeof(bs) == CBOR_TYPE_BYTESTRING); @@ -265,7 +265,7 @@ unsigned char data11[] = {0x5F, 0x58, 0x01, 0xA1, 0x58, 0x01, 0xA2, 0xFF, 0xFF}; /* start | bstring | bstring | break| * extra */ -static void test_two_indef(void **_CBOR_UNUSED(_state)) { +static void test_two_indef(void** _state _CBOR_UNUSED) { bs = cbor_load(data11, 9, &res); assert_non_null(bs); assert_size_equal(1, cbor_refcount(bs)); @@ -293,23 +293,23 @@ unsigned char data12[] = {0x5F, 0x58, 0x01}; /* start | bstring - too short */ -static void test_missing_indef(void **_CBOR_UNUSED(_state)) { +static void test_missing_indef(void** _state _CBOR_UNUSED) { bs = cbor_load(data12, 3, &res); assert_true(res.error.code == CBOR_ERR_NOTENOUGHDATA); assert_null(bs); } -static void test_inline_creation(void **_CBOR_UNUSED(_state)) { +static void test_inline_creation(void** _state _CBOR_UNUSED) { bs = cbor_build_bytestring((cbor_data) "Hello!", 6); assert_memory_equal(cbor_bytestring_handle(bs), "Hello!", 6); cbor_decref(&bs); } -static void test_add_chunk_reallocation_overflow(void **_CBOR_UNUSED(_state)) { +static void test_add_chunk_reallocation_overflow(void** _state _CBOR_UNUSED) { bs = cbor_new_indefinite_bytestring(); - cbor_item_t *chunk = cbor_build_bytestring((cbor_data) "Hello!", 6); - struct cbor_indefinite_string_data *metadata = - (struct cbor_indefinite_string_data *)bs->data; + cbor_item_t* chunk = cbor_build_bytestring((cbor_data) "Hello!", 6); + struct cbor_indefinite_string_data* metadata = + (struct cbor_indefinite_string_data*)bs->data; // Pretend we already have many chunks allocated metadata->chunk_count = SIZE_MAX; metadata->chunk_capacity = SIZE_MAX; @@ -323,7 +323,7 @@ static void test_add_chunk_reallocation_overflow(void **_CBOR_UNUSED(_state)) { cbor_decref(&bs); } -static void test_bytestring_creation(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_definite_bytestring()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_indefinite_bytestring()); }); @@ -337,18 +337,18 @@ static void test_bytestring_creation(void **_CBOR_UNUSED(_state)) { MALLOC_FAIL); } -static void test_bytestring_add_chunk(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_add_chunk(void** _state _CBOR_UNUSED) { unsigned char bytes[] = {0, 0, 0xFF, 0xAB}; WITH_MOCK_MALLOC( { - cbor_item_t *bytestring = cbor_new_indefinite_bytestring(); - cbor_item_t *chunk = cbor_build_bytestring(bytes, 4); + cbor_item_t* bytestring = cbor_new_indefinite_bytestring(); + cbor_item_t* chunk = cbor_build_bytestring(bytes, 4); assert_false(cbor_bytestring_add_chunk(bytestring, chunk)); assert_size_equal(cbor_bytestring_chunk_count(bytestring), 0); assert_size_equal( - ((struct cbor_indefinite_string_data *)bytestring->data) + ((struct cbor_indefinite_string_data*)bytestring->data) ->chunk_capacity, 0); diff --git a/contrib/libcbor/test/callbacks_test.c b/contrib/libcbor/test/callbacks_test.c index 65c5d37f4399..1045ed1bc254 100644 --- a/contrib/libcbor/test/callbacks_test.c +++ b/contrib/libcbor/test/callbacks_test.c @@ -20,7 +20,7 @@ unsigned char data[] = { 0x88, 0x00, 0x75, 0x9C, 0xF6, 0xF7, 0xF5}; /* Exercise the default callbacks */ -static void test_default_callbacks(void** _CBOR_UNUSED(_state)) { +static void test_default_callbacks(void** _state _CBOR_UNUSED) { size_t read = 0; while (read < 79) { struct cbor_decoder_result result = @@ -31,10 +31,11 @@ static void test_default_callbacks(void** _CBOR_UNUSED(_state)) { unsigned char bytestring_data[] = {0x01, 0x02, 0x03}; static void test_builder_byte_string_callback_append( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null( - _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -71,10 +72,11 @@ static void test_builder_byte_string_callback_append( } static void test_builder_byte_string_callback_append_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null( - _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -103,10 +105,11 @@ static void test_builder_byte_string_callback_append_alloc_failure( } static void test_builder_byte_string_callback_append_item_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null( - _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -137,10 +140,11 @@ static void test_builder_byte_string_callback_append_item_alloc_failure( } static void test_builder_byte_string_callback_append_parent_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null( - _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_bytestring(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -171,9 +175,11 @@ static void test_builder_byte_string_callback_append_parent_alloc_failure( } unsigned char string_data[] = {0x61, 0x62, 0x63}; -static void test_builder_string_callback_append(void** _CBOR_UNUSED(_state)) { +static void test_builder_string_callback_append(void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_indefinite_string(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_string(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -208,9 +214,11 @@ static void test_builder_string_callback_append(void** _CBOR_UNUSED(_state)) { } static void test_builder_string_callback_append_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_indefinite_string(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_string(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -239,9 +247,11 @@ static void test_builder_string_callback_append_alloc_failure( } static void test_builder_string_callback_append_item_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_indefinite_string(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_string(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -271,9 +281,11 @@ static void test_builder_string_callback_append_item_alloc_failure( } static void test_builder_string_callback_append_parent_alloc_failure( - void** _CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_indefinite_string(), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_string(), 0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -302,9 +314,11 @@ static void test_builder_string_callback_append_parent_alloc_failure( _cbor_stack_pop(&stack); } -static void test_append_array_failure(void** _CBOR_UNUSED(_state)) { +static void test_append_array_failure(void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_definite_array(0), 0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_definite_array(0), 0); + assert_non_null(stack_top); stack.top->subitems = 1; struct _cbor_decoder_context context = { .creation_failed = false, @@ -331,10 +345,11 @@ static void test_append_array_failure(void** _CBOR_UNUSED(_state)) { _cbor_stack_pop(&stack); } -static void test_append_map_failure(void** _CBOR_UNUSED(_state)) { +static void test_append_map_failure(void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null( - _cbor_stack_push(&stack, cbor_new_indefinite_map(), /*subitems=*/0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_indefinite_map(), /*subitems=*/0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, @@ -362,7 +377,7 @@ static void test_append_map_failure(void** _CBOR_UNUSED(_state)) { // Size 1 array start, but we get an indef break unsigned char invalid_indef_break_data[] = {0x81, 0xFF}; -static void test_invalid_indef_break(void** _CBOR_UNUSED(_state)) { +static void test_invalid_indef_break(void** _state _CBOR_UNUSED) { struct cbor_load_result res; cbor_item_t* item = cbor_load(invalid_indef_break_data, 2, &res); @@ -371,9 +386,11 @@ static void test_invalid_indef_break(void** _CBOR_UNUSED(_state)) { assert_true(res.error.code == CBOR_ERR_SYNTAXERROR); } -static void test_invalid_state_indef_break(void** _CBOR_UNUSED(_state)) { +static void test_invalid_state_indef_break(void** _state _CBOR_UNUSED) { struct _cbor_stack stack = _cbor_stack_init(); - assert_non_null(_cbor_stack_push(&stack, cbor_new_int8(), /*subitems=*/0)); + struct _cbor_stack_record* stack_top = + _cbor_stack_push(&stack, cbor_new_int8(), /*subitems=*/0); + assert_non_null(stack_top); struct _cbor_decoder_context context = { .creation_failed = false, .syntax_error = false, diff --git a/contrib/libcbor/test/cbor_serialize_test.c b/contrib/libcbor/test/cbor_serialize_test.c index a2907b149345..5f8cf9f611da 100644 --- a/contrib/libcbor/test/cbor_serialize_test.c +++ b/contrib/libcbor/test/cbor_serialize_test.c @@ -6,7 +6,6 @@ */ // cbor_serialize_alloc -#pragma clang diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #include <math.h> @@ -24,8 +23,8 @@ unsigned char buffer[512]; -static void test_serialize_uint8_embed(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int8(); +static void test_serialize_uint8_embed(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int8(); cbor_set_uint8(item, 0); assert_size_equal(1, cbor_serialize(item, buffer, 512)); assert_memory_equal(buffer, (unsigned char[]){0x00}, 1); @@ -33,8 +32,8 @@ static void test_serialize_uint8_embed(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_uint8(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int8(); +static void test_serialize_uint8(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int8(); cbor_set_uint8(item, 42); assert_size_equal(2, cbor_serialize(item, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x18, 0x2a}), 2); @@ -42,8 +41,8 @@ static void test_serialize_uint8(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_uint16(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int16(); +static void test_serialize_uint16(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int16(); cbor_set_uint16(item, 1000); assert_size_equal(3, cbor_serialize(item, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x19, 0x03, 0xE8}), 3); @@ -51,8 +50,8 @@ static void test_serialize_uint16(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_uint32(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int32(); +static void test_serialize_uint32(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int32(); cbor_set_uint32(item, 1000000); assert_size_equal(5, cbor_serialize(item, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x1A, 0x00, 0x0F, 0x42, 0x40}), @@ -61,8 +60,8 @@ static void test_serialize_uint32(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_uint64(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int64(); +static void test_serialize_uint64(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int64(); cbor_set_uint64(item, 1000000000000); assert_size_equal(9, cbor_serialize(item, buffer, 512)); assert_memory_equal( @@ -73,8 +72,8 @@ static void test_serialize_uint64(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_negint8_embed(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int8(); +static void test_serialize_negint8_embed(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int8(); cbor_set_uint8(item, 0); cbor_mark_negint(item); assert_size_equal(1, cbor_serialize(item, buffer, 512)); @@ -83,8 +82,8 @@ static void test_serialize_negint8_embed(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_negint8(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int8(); +static void test_serialize_negint8(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int8(); cbor_set_uint8(item, 42); cbor_mark_negint(item); assert_size_equal(2, cbor_serialize(item, buffer, 512)); @@ -93,8 +92,8 @@ static void test_serialize_negint8(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_negint16(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int16(); +static void test_serialize_negint16(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int16(); cbor_set_uint16(item, 1000); cbor_mark_negint(item); assert_size_equal(3, cbor_serialize(item, buffer, 512)); @@ -103,8 +102,8 @@ static void test_serialize_negint16(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_negint32(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int32(); +static void test_serialize_negint32(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int32(); cbor_set_uint32(item, 1000000); cbor_mark_negint(item); assert_size_equal(5, cbor_serialize(item, buffer, 512)); @@ -114,8 +113,8 @@ static void test_serialize_negint32(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_negint64(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_int64(); +static void test_serialize_negint64(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_int64(); cbor_set_uint64(item, 1000000000000); cbor_mark_negint(item); assert_size_equal(9, cbor_serialize(item, buffer, 512)); @@ -127,9 +126,9 @@ static void test_serialize_negint64(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_definite_bytestring(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_bytestring(); - unsigned char *data = malloc(256); +static void test_serialize_definite_bytestring(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_bytestring(); + unsigned char* data = malloc(256); cbor_bytestring_set_handle(item, data, 256); memset(data, 0, 256); /* Prevent undefined behavior in comparison */ assert_size_equal(256 + 3, cbor_serialize(item, buffer, 512)); @@ -139,11 +138,11 @@ static void test_serialize_definite_bytestring(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_indefinite_bytestring(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_bytestring(); +static void test_serialize_indefinite_bytestring(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_bytestring(); - cbor_item_t *chunk = cbor_new_definite_bytestring(); - unsigned char *data = malloc(256); + cbor_item_t* chunk = cbor_new_definite_bytestring(); + unsigned char* data = malloc(256); memset(data, 0, 256); /* Prevent undefined behavior in comparison */ cbor_bytestring_set_handle(chunk, data, 256); @@ -159,11 +158,11 @@ static void test_serialize_indefinite_bytestring(void **_CBOR_UNUSED(_state)) { } static void test_serialize_bytestring_size_overflow( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_bytestring(); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_bytestring(); // Fake having a huge chunk of data - unsigned char *data = malloc(1); + unsigned char* data = malloc(1); cbor_bytestring_set_handle(item, data, SIZE_MAX); // Would require 1 + 8 + SIZE_MAX bytes, which overflows size_t @@ -172,9 +171,9 @@ static void test_serialize_bytestring_size_overflow( cbor_decref(&item); } -static void test_serialize_bytestring_no_space(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_bytestring(); - unsigned char *data = malloc(12); +static void test_serialize_bytestring_no_space(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_bytestring(); + unsigned char* data = malloc(12); cbor_bytestring_set_handle(item, data, 12); assert_size_equal(cbor_serialize(item, buffer, 1), 0); @@ -183,10 +182,10 @@ static void test_serialize_bytestring_no_space(void **_CBOR_UNUSED(_state)) { } static void test_serialize_indefinite_bytestring_no_space( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_bytestring(); - cbor_item_t *chunk = cbor_new_definite_bytestring(); - unsigned char *data = malloc(256); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_bytestring(); + cbor_item_t* chunk = cbor_new_definite_bytestring(); + unsigned char* data = malloc(256); cbor_bytestring_set_handle(chunk, data, 256); assert_true(cbor_bytestring_add_chunk(item, cbor_move(chunk))); @@ -203,10 +202,10 @@ static void test_serialize_indefinite_bytestring_no_space( cbor_decref(&item); } -static void test_serialize_definite_string(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_string(); - unsigned char *data = malloc(12); - strncpy((char *)data, "Hello world!", 12); +static void test_serialize_definite_string(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_string(); + unsigned char* data = malloc(12); + strncpy((char*)data, "Hello world!", 12); cbor_string_set_handle(item, data, 12); assert_size_equal(1 + 12, cbor_serialize(item, buffer, 512)); assert_memory_equal( @@ -219,11 +218,11 @@ static void test_serialize_definite_string(void **_CBOR_UNUSED(_state)) { } static void test_serialize_definite_string_4b_header( - void **_CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { #if SIZE_MAX > UINT16_MAX - cbor_item_t *item = cbor_new_definite_string(); + cbor_item_t* item = cbor_new_definite_string(); const size_t size = (size_t)UINT16_MAX + 1; - unsigned char *data = malloc(size); + unsigned char* data = malloc(size); memset(data, 0, size); cbor_string_set_handle(item, data, size); assert_size_equal(cbor_serialized_size(item), 1 + 4 + size); @@ -232,11 +231,11 @@ static void test_serialize_definite_string_4b_header( } static void test_serialize_definite_string_8b_header( - void **_CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { #if SIZE_MAX > UINT32_MAX - cbor_item_t *item = cbor_new_definite_string(); + cbor_item_t* item = cbor_new_definite_string(); const size_t size = (size_t)UINT32_MAX + 1; - unsigned char *data = malloc(1); + unsigned char* data = malloc(1); data[0] = '\0'; cbor_string_set_handle(item, data, 1); // Pretend that we have a big item to avoid the huge malloc @@ -246,12 +245,12 @@ static void test_serialize_definite_string_8b_header( #endif } -static void test_serialize_indefinite_string(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_string(); - cbor_item_t *chunk = cbor_new_definite_string(); +static void test_serialize_indefinite_string(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_string(); + cbor_item_t* chunk = cbor_new_definite_string(); - unsigned char *data = malloc(12); - strncpy((char *)data, "Hello world!", 12); + unsigned char* data = malloc(12); + strncpy((char*)data, "Hello world!", 12); cbor_string_set_handle(chunk, data, 12); assert_true(cbor_string_add_chunk(item, cbor_move(chunk))); @@ -267,9 +266,9 @@ static void test_serialize_indefinite_string(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_string_no_space(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_string(); - unsigned char *data = malloc(12); +static void test_serialize_string_no_space(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_string(); + unsigned char* data = malloc(12); memset(data, 0, 12); cbor_string_set_handle(item, data, 12); @@ -279,10 +278,10 @@ static void test_serialize_string_no_space(void **_CBOR_UNUSED(_state)) { } static void test_serialize_indefinite_string_no_space( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_string(); - cbor_item_t *chunk = cbor_new_definite_string(); - unsigned char *data = malloc(256); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_string(); + cbor_item_t* chunk = cbor_new_definite_string(); + unsigned char* data = malloc(256); memset(data, 0, 256); cbor_string_set_handle(chunk, data, 256); assert_true(cbor_string_add_chunk(item, cbor_move(chunk))); @@ -300,10 +299,10 @@ static void test_serialize_indefinite_string_no_space( cbor_decref(&item); } -static void test_serialize_definite_array(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_array(2); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_serialize_definite_array(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_array(2); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_array_push(item, one)); assert_true(cbor_array_set(item, 1, two)); @@ -317,9 +316,9 @@ static void test_serialize_definite_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_serialize_array_no_space(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_array(); - cbor_item_t *one = cbor_build_uint8(1); +static void test_serialize_array_no_space(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_array(); + cbor_item_t* one = cbor_build_uint8(1); assert_true(cbor_array_push(item, one)); assert_size_equal(cbor_serialized_size(item), 3); @@ -336,10 +335,10 @@ static void test_serialize_array_no_space(void **_CBOR_UNUSED(_state)) { cbor_decref(&one); } -static void test_serialize_indefinite_array(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_array(); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_serialize_indefinite_array(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_array(); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_array_push(item, one)); assert_true(cbor_array_push(item, two)); @@ -352,10 +351,10 @@ static void test_serialize_indefinite_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_serialize_definite_map(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_map(2); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_serialize_definite_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_map(2); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_map_add(item, (struct cbor_pair){.key = one, .value = two})); assert_true(cbor_map_add(item, (struct cbor_pair){.key = two, .value = one})); @@ -369,10 +368,10 @@ static void test_serialize_definite_map(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_serialize_indefinite_map(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_map(); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_serialize_indefinite_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_map(); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_map_add(item, (struct cbor_pair){.key = one, .value = two})); assert_true(cbor_map_add(item, (struct cbor_pair){.key = two, .value = one})); @@ -386,10 +385,10 @@ static void test_serialize_indefinite_map(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_serialize_map_no_space(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_map(); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); +static void test_serialize_map_no_space(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_map(); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_true(cbor_map_add(item, (struct cbor_pair){.key = one, .value = two})); assert_size_equal(cbor_serialized_size(item), 4); @@ -410,9 +409,9 @@ static void test_serialize_map_no_space(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_serialize_tags(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_tag(21); - cbor_item_t *one = cbor_build_uint8(1); +static void test_serialize_tags(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_tag(21); + cbor_item_t* one = cbor_build_uint8(1); cbor_tag_set_item(item, one); assert_size_equal(2, cbor_serialize(item, buffer, 512)); @@ -422,9 +421,9 @@ static void test_serialize_tags(void **_CBOR_UNUSED(_state)) { cbor_decref(&one); } -static void test_serialize_tags_no_space(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_tag(21); - cbor_item_t *one = cbor_build_uint8(1); +static void test_serialize_tags_no_space(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_tag(21); + cbor_item_t* one = cbor_build_uint8(1); cbor_tag_set_item(item, one); assert_size_equal(cbor_serialized_size(item), 2); @@ -438,8 +437,8 @@ static void test_serialize_tags_no_space(void **_CBOR_UNUSED(_state)) { cbor_decref(&one); } -static void test_serialize_half(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_float2(); +static void test_serialize_half(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_float2(); cbor_set_float2(item, NAN); assert_size_equal(3, cbor_serialize(item, buffer, 512)); @@ -448,8 +447,8 @@ static void test_serialize_half(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_single(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_float4(); +static void test_serialize_single(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_float4(); cbor_set_float4(item, 100000.0f); assert_size_equal(5, cbor_serialize(item, buffer, 512)); @@ -459,8 +458,8 @@ static void test_serialize_single(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_double(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_float8(); +static void test_serialize_double(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_float8(); cbor_set_float8(item, -4.1); assert_size_equal(9, cbor_serialize(item, buffer, 512)); @@ -472,8 +471,8 @@ static void test_serialize_double(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_ctrl(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_undef(); +static void test_serialize_ctrl(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_undef(); assert_size_equal(1, cbor_serialize(item, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF7}), 1); @@ -481,8 +480,8 @@ static void test_serialize_ctrl(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_serialize_long_ctrl(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_ctrl(); +static void test_serialize_long_ctrl(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_ctrl(); cbor_set_ctrl(item, 254); assert_size_equal(2, cbor_serialize(item, buffer, 512)); @@ -491,13 +490,13 @@ static void test_serialize_long_ctrl(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_auto_serialize(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_array(4); +static void test_auto_serialize(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_array(4); for (size_t i = 0; i < 4; i++) { assert_true(cbor_array_push(item, cbor_move(cbor_build_uint64(0)))); } - unsigned char *output; + unsigned char* output; size_t output_size; assert_size_equal(cbor_serialize_alloc(item, &output, &output_size), 37); assert_size_equal(output_size, 37); @@ -507,10 +506,10 @@ static void test_auto_serialize(void **_CBOR_UNUSED(_state)) { _cbor_free(output); } -static void test_auto_serialize_no_size(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_uint8(1); +static void test_auto_serialize_no_size(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_uint8(1); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 1); assert_memory_equal(output, ((unsigned char[]){0x01}), 1); assert_size_equal(cbor_serialized_size(item), 1); @@ -518,16 +517,16 @@ static void test_auto_serialize_no_size(void **_CBOR_UNUSED(_state)) { _cbor_free(output); } -static void test_auto_serialize_too_large(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_string(); - cbor_item_t *chunk = cbor_new_definite_string(); +static void test_auto_serialize_too_large(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_string(); + cbor_item_t* chunk = cbor_new_definite_string(); assert_true(cbor_string_add_chunk(item, chunk)); // Pretend the chunk is huge chunk->metadata.string_metadata.length = SIZE_MAX; assert_true(SIZE_MAX + 2 == 1); assert_size_equal(cbor_serialized_size(item), 0); - unsigned char *output; + unsigned char* output; size_t output_size; assert_size_equal(cbor_serialize_alloc(item, &output, &output_size), 0); assert_size_equal(output_size, 0); @@ -538,11 +537,11 @@ static void test_auto_serialize_too_large(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_auto_serialize_alloc_fail(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_uint8(42); +static void test_auto_serialize_alloc_fail(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_uint8(42); WITH_FAILING_MALLOC({ - unsigned char *output; + unsigned char* output; size_t output_size; assert_size_equal(cbor_serialize_alloc(item, &output, &output_size), 0); assert_size_equal(output_size, 0); @@ -553,10 +552,10 @@ static void test_auto_serialize_alloc_fail(void **_CBOR_UNUSED(_state)) { } static void test_auto_serialize_zero_len_bytestring( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_bytestring((cbor_data) "", 0); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_bytestring((cbor_data) "", 0); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 1); assert_memory_equal(output, ((unsigned char[]){0x40}), 1); assert_size_equal(cbor_serialized_size(item), 1); @@ -564,10 +563,10 @@ static void test_auto_serialize_zero_len_bytestring( _cbor_free(output); } -static void test_auto_serialize_zero_len_string(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_string(""); +static void test_auto_serialize_zero_len_string(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_string(""); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 1); assert_memory_equal(output, ((unsigned char[]){0x60}), 1); assert_size_equal(cbor_serialized_size(item), 1); @@ -576,13 +575,13 @@ static void test_auto_serialize_zero_len_string(void **_CBOR_UNUSED(_state)) { } static void test_auto_serialize_zero_len_bytestring_chunk( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_bytestring(); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "", 0)))); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 3); assert_memory_equal(output, ((unsigned char[]){0x5f, 0x40, 0xff}), 3); assert_size_equal(cbor_serialized_size(item), 3); @@ -591,12 +590,12 @@ static void test_auto_serialize_zero_len_bytestring_chunk( } static void test_auto_serialize_zero_len_string_chunk( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_string(); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("")))); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 3); assert_memory_equal(output, ((unsigned char[]){0x7f, 0x60, 0xff}), 3); assert_size_equal(cbor_serialized_size(item), 3); @@ -604,10 +603,10 @@ static void test_auto_serialize_zero_len_string_chunk( _cbor_free(output); } -static void test_auto_serialize_zero_len_array(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_array(0); +static void test_auto_serialize_zero_len_array(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_array(0); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 1); assert_memory_equal(output, ((unsigned char[]){0x80}), 1); assert_size_equal(cbor_serialized_size(item), 1); @@ -616,10 +615,10 @@ static void test_auto_serialize_zero_len_array(void **_CBOR_UNUSED(_state)) { } static void test_auto_serialize_zero_len_indef_array( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_array(); + void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_array(); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 2); assert_memory_equal(output, ((unsigned char[]){0x9f, 0xff}), 2); assert_size_equal(cbor_serialized_size(item), 2); @@ -627,10 +626,10 @@ static void test_auto_serialize_zero_len_indef_array( _cbor_free(output); } -static void test_auto_serialize_zero_len_map(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_map(0); +static void test_auto_serialize_zero_len_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_map(0); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 1); assert_memory_equal(output, ((unsigned char[]){0xa0}), 1); assert_size_equal(cbor_serialized_size(item), 1); @@ -638,11 +637,10 @@ static void test_auto_serialize_zero_len_map(void **_CBOR_UNUSED(_state)) { _cbor_free(output); } -static void test_auto_serialize_zero_len_indef_map( - void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_map(); +static void test_auto_serialize_zero_len_indef_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_map(); - unsigned char *output; + unsigned char* output; assert_size_equal(cbor_serialize_alloc(item, &output, NULL), 2); assert_memory_equal(output, ((unsigned char[]){0xbf, 0xff}), 2); assert_size_equal(cbor_serialized_size(item), 2); diff --git a/contrib/libcbor/test/cbor_stream_decode_test.c b/contrib/libcbor/test/cbor_stream_decode_test.c index 5288016c2fc6..d1befd09b131 100644 --- a/contrib/libcbor/test/cbor_stream_decode_test.c +++ b/contrib/libcbor/test/cbor_stream_decode_test.c @@ -9,12 +9,12 @@ #include "cbor.h" #include "stream_expectations.h" -static void test_no_data(void **_CBOR_UNUSED(_state)) { +static void test_no_data(void** _state _CBOR_UNUSED) { assert_decoder_result_nedata(1, decode(NULL, 0)); } unsigned char embedded_uint8_data[] = {0x00, 0x01, 0x05, 0x17}; -static void test_uint8_embedded_decoding(void **_CBOR_UNUSED(_state)) { +static void test_uint8_embedded_decoding(void** _state _CBOR_UNUSED) { assert_uint8_eq(0); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(embedded_uint8_data, 1)); @@ -33,7 +33,7 @@ static void test_uint8_embedded_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char uint8_data[] = {0x18, 0x83, 0x18, 0xFF}; -static void test_uint8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_uint8_decoding(void** _state _CBOR_UNUSED) { assert_uint8_eq(0x83); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(uint8_data, 2)); @@ -44,7 +44,7 @@ static void test_uint8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char uint16_data[] = {0x19, 0x01, 0xf4}; -static void test_uint16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_uint16_decoding(void** _state _CBOR_UNUSED) { assert_uint16_eq(500); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(uint16_data, 3)); @@ -52,7 +52,7 @@ static void test_uint16_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char uint32_data[] = {0x1a, 0xa5, 0xf7, 0x02, 0xb3}; -static void test_uint32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_uint32_decoding(void** _state _CBOR_UNUSED) { assert_uint32_eq((uint32_t)2784428723UL); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(uint32_data, 5)); @@ -61,7 +61,7 @@ static void test_uint32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char uint64_data[] = {0x1b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3}; -static void test_uint64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_uint64_decoding(void** _state _CBOR_UNUSED) { assert_uint64_eq(11959030306112471731ULL); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(uint64_data, 9)); @@ -69,7 +69,7 @@ static void test_uint64_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char embedded_negint8_data[] = {0x20, 0x21, 0x25, 0x37}; -static void test_negint8_embedded_decoding(void **_CBOR_UNUSED(_state)) { +static void test_negint8_embedded_decoding(void** _state _CBOR_UNUSED) { assert_negint8_eq(0); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(embedded_negint8_data, 1)); @@ -88,7 +88,7 @@ static void test_negint8_embedded_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char negint8_data[] = {0x38, 0x83, 0x38, 0xFF}; -static void test_negint8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_negint8_decoding(void** _state _CBOR_UNUSED) { assert_negint8_eq(0x83); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(negint8_data, 2)); @@ -99,7 +99,7 @@ static void test_negint8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char negint16_data[] = {0x39, 0x01, 0xf4}; -static void test_negint16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_negint16_decoding(void** _state _CBOR_UNUSED) { assert_negint16_eq(500); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(negint16_data, 3)); @@ -107,7 +107,7 @@ static void test_negint16_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char negint32_data[] = {0x3a, 0xa5, 0xf7, 0x02, 0xb3}; -static void test_negint32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_negint32_decoding(void** _state _CBOR_UNUSED) { assert_negint32_eq((uint32_t)2784428723UL); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(negint32_data, 5)); @@ -116,7 +116,7 @@ static void test_negint32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char negint64_data[] = {0x3b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3}; -static void test_negint64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_negint64_decoding(void** _state _CBOR_UNUSED) { assert_negint64_eq(11959030306112471731ULL); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(negint64_data, 9)); @@ -124,7 +124,7 @@ static void test_negint64_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char bstring_embedded_int8_data[] = {0x41, 0xFF}; -static void test_bstring_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_embedded_int8_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_embedded_int8_data + 1, 1); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(bstring_embedded_int8_data, 2)); @@ -136,7 +136,7 @@ static void test_bstring_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { // the second byte of input); the data is never read, so we never run into // memory issues despite not allocating and initializing all the data. unsigned char bstring_int8_data[] = {0x58, 0x02 /*, [2 bytes] */}; -static void test_bstring_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_int8_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_int8_data + 2, 2); assert_decoder_result(4, CBOR_DECODER_FINISHED, decode(bstring_int8_data, 4)); @@ -146,7 +146,7 @@ static void test_bstring_int8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char bstring_int8_empty_data[] = {0x58, 0x00}; -static void test_bstring_int8_empty_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_int8_empty_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_int8_empty_data + 2, 0); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(bstring_int8_empty_data, 2)); @@ -155,7 +155,7 @@ static void test_bstring_int8_empty_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char bstring_int16_data[] = {0x59, 0x01, 0x5C /*, [348 bytes] */}; -static void test_bstring_int16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_int16_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_int16_data + 3, 348); assert_decoder_result(3 + 348, CBOR_DECODER_FINISHED, decode(bstring_int16_data, 3 + 348)); @@ -167,7 +167,7 @@ static void test_bstring_int16_decoding(void **_CBOR_UNUSED(_state)) { unsigned char bstring_int32_data[] = {0x5A, 0x00, 0x10, 0x10, 0x10 /*, [1052688 bytes] */}; -static void test_bstring_int32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_int32_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_int32_data + 5, 1052688); assert_decoder_result(5 + 1052688, CBOR_DECODER_FINISHED, decode(bstring_int32_data, 5 + 1052688)); @@ -181,7 +181,7 @@ static void test_bstring_int32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char bstring_int64_data[] = { 0x5B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00 /*, [4294967296 bytes] */}; -static void test_bstring_int64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_bstring_int64_decoding(void** _state _CBOR_UNUSED) { assert_bstring_mem_eq(bstring_int64_data + 9, 4294967296); assert_decoder_result(9 + 4294967296, CBOR_DECODER_FINISHED, decode(bstring_int64_data, 9 + 4294967296)); @@ -194,7 +194,7 @@ static void test_bstring_int64_decoding(void **_CBOR_UNUSED(_state)) { unsigned char bstring_indef_1_data[] = {0x5F, 0x40 /* Empty byte string */, 0xFF}; -static void test_bstring_indef_decoding_1(void **_CBOR_UNUSED(_state)) { +static void test_bstring_indef_decoding_1(void** _state _CBOR_UNUSED) { assert_bstring_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(bstring_indef_1_data, 3)); @@ -209,7 +209,7 @@ static void test_bstring_indef_decoding_1(void **_CBOR_UNUSED(_state)) { } unsigned char bstring_indef_2_data[] = {0x5F, 0xFF}; -static void test_bstring_indef_decoding_2(void **_CBOR_UNUSED(_state)) { +static void test_bstring_indef_decoding_2(void** _state _CBOR_UNUSED) { assert_bstring_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(bstring_indef_2_data, 2)); @@ -226,7 +226,7 @@ unsigned char bstring_indef_3_data[] = {0x5F, 0x58, 0x01, 0x00, // Break 0xFF}; -static void test_bstring_indef_decoding_3(void **_CBOR_UNUSED(_state)) { +static void test_bstring_indef_decoding_3(void** _state _CBOR_UNUSED) { assert_bstring_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(bstring_indef_3_data, 6)); @@ -245,7 +245,7 @@ static void test_bstring_indef_decoding_3(void **_CBOR_UNUSED(_state)) { } unsigned char string_embedded_int8_data[] = {0x61, 0xFF}; -static void test_string_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_embedded_int8_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_embedded_int8_data + 1, 1); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(string_embedded_int8_data, 2)); @@ -257,7 +257,7 @@ static void test_string_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { // the second byte of input); the data is never read, so we never run into // memory issues despite not allocating and initializing all the data. unsigned char string_int8_data[] = {0x78, 0x02 /*, [2 bytes] */}; -static void test_string_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_int8_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_int8_data + 2, 2); assert_decoder_result(4, CBOR_DECODER_FINISHED, decode(string_int8_data, 4)); @@ -267,7 +267,7 @@ static void test_string_int8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char string_int8_empty_data[] = {0x78, 0x00}; -static void test_string_int8_empty_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_int8_empty_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_int8_empty_data + 2, 0); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(string_int8_empty_data, 2)); @@ -276,7 +276,7 @@ static void test_string_int8_empty_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char string_int16_data[] = {0x79, 0x01, 0x5C /*, [348 bytes] */}; -static void test_string_int16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_int16_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_int16_data + 3, 348); assert_decoder_result(3 + 348, CBOR_DECODER_FINISHED, decode(string_int16_data, 3 + 348)); @@ -288,7 +288,7 @@ static void test_string_int16_decoding(void **_CBOR_UNUSED(_state)) { unsigned char string_int32_data[] = {0x7A, 0x00, 0x10, 0x10, 0x10 /*, [1052688 bytes] */}; -static void test_string_int32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_int32_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_int32_data + 5, 1052688); assert_decoder_result(5 + 1052688, CBOR_DECODER_FINISHED, decode(string_int32_data, 5 + 1052688)); @@ -302,7 +302,7 @@ static void test_string_int32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char string_int64_data[] = { 0x7B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00 /*, [4294967296 bytes] */}; -static void test_string_int64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_string_int64_decoding(void** _state _CBOR_UNUSED) { assert_string_mem_eq(string_int64_data + 9, 4294967296); assert_decoder_result(9 + 4294967296, CBOR_DECODER_FINISHED, decode(string_int64_data, 9 + 4294967296)); @@ -314,7 +314,7 @@ static void test_string_int64_decoding(void **_CBOR_UNUSED(_state)) { #endif unsigned char string_indef_1_data[] = {0x7F, 0x60 /* Empty string */, 0xFF}; -static void test_string_indef_decoding_1(void **_CBOR_UNUSED(_state)) { +static void test_string_indef_decoding_1(void** _state _CBOR_UNUSED) { assert_string_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(string_indef_1_data, 3)); @@ -329,7 +329,7 @@ static void test_string_indef_decoding_1(void **_CBOR_UNUSED(_state)) { } unsigned char string_indef_2_data[] = {0x7F, 0xFF}; -static void test_string_indef_decoding_2(void **_CBOR_UNUSED(_state)) { +static void test_string_indef_decoding_2(void** _state _CBOR_UNUSED) { assert_string_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(string_indef_2_data, 2)); @@ -346,7 +346,7 @@ unsigned char string_indef_3_data[] = {0x7F, 0x78, 0x01, 0x00, // Break 0xFF}; -static void test_string_indef_decoding_3(void **_CBOR_UNUSED(_state)) { +static void test_string_indef_decoding_3(void** _state _CBOR_UNUSED) { assert_string_indef_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(string_indef_3_data, 6)); @@ -365,14 +365,14 @@ static void test_string_indef_decoding_3(void **_CBOR_UNUSED(_state)) { } unsigned char array_embedded_int8_data[] = {0x80}; -static void test_array_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_embedded_int8_decoding(void** _state _CBOR_UNUSED) { assert_array_start(0); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(array_embedded_int8_data, 1)); } unsigned char array_int8_data[] = {0x98, 0x02, 0x00, 0x01}; -static void test_array_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_int8_decoding(void** _state _CBOR_UNUSED) { assert_array_start(2); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(array_int8_data, 4)); @@ -388,7 +388,7 @@ static void test_array_int8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char array_int16_data[] = {0x99, 0x00, 0x02, 0x00, 0x01}; -static void test_array_int16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_int16_decoding(void** _state _CBOR_UNUSED) { assert_array_start(2); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(array_int16_data, 5)); @@ -404,7 +404,7 @@ static void test_array_int16_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char array_int32_data[] = {0x9A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01}; -static void test_array_int32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_int32_decoding(void** _state _CBOR_UNUSED) { assert_array_start(2); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(array_int32_data, 7)); @@ -421,7 +421,7 @@ static void test_array_int32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char array_int64_data[] = {0x9B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01}; -static void test_array_int64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_int64_decoding(void** _state _CBOR_UNUSED) { assert_array_start(2); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(array_int64_data, 11)); @@ -437,7 +437,7 @@ static void test_array_int64_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char array_of_arrays_data[] = {0x82, 0x80, 0x80}; -static void test_array_of_arrays_decoding(void **_CBOR_UNUSED(_state)) { +static void test_array_of_arrays_decoding(void** _state _CBOR_UNUSED) { assert_array_start(2); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(array_of_arrays_data, 3)); @@ -452,7 +452,7 @@ static void test_array_of_arrays_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char indef_array_data_1[] = {0x9F, 0x00, 0x18, 0xFF, 0x9F, 0xFF, 0xFF}; -static void test_indef_array_decoding_1(void **_CBOR_UNUSED(_state)) { +static void test_indef_array_decoding_1(void** _state _CBOR_UNUSED) { assert_indef_array_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(indef_array_data_1, 7)); @@ -479,7 +479,7 @@ static void test_indef_array_decoding_1(void **_CBOR_UNUSED(_state)) { } unsigned char map_embedded_int8_data[] = {0xa1, 0x01, 0x00}; -static void test_map_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_map_embedded_int8_decoding(void** _state _CBOR_UNUSED) { assert_map_start(1); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(map_embedded_int8_data, 3)); @@ -494,7 +494,7 @@ static void test_map_embedded_int8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char map_int8_data[] = {0xB8, 0x01, 0x00, 0x01}; -static void test_map_int8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_map_int8_decoding(void** _state _CBOR_UNUSED) { assert_map_start(1); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(map_int8_data, 4)); @@ -508,7 +508,7 @@ static void test_map_int8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char map_int16_data[] = {0xB9, 0x00, 0x01, 0x00, 0x01}; -static void test_map_int16_decoding(void **_CBOR_UNUSED(_state)) { +static void test_map_int16_decoding(void** _state _CBOR_UNUSED) { assert_map_start(1); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(map_int16_data, 5)); @@ -524,7 +524,7 @@ static void test_map_int16_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char map_int32_data[] = {0xBA, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01}; -static void test_map_int32_decoding(void **_CBOR_UNUSED(_state)) { +static void test_map_int32_decoding(void** _state _CBOR_UNUSED) { assert_map_start(1); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(map_int32_data, 7)); @@ -541,7 +541,7 @@ static void test_map_int32_decoding(void **_CBOR_UNUSED(_state)) { unsigned char map_int64_data[] = {0xBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01}; -static void test_map_int64_decoding(void **_CBOR_UNUSED(_state)) { +static void test_map_int64_decoding(void** _state _CBOR_UNUSED) { assert_map_start(1); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(map_int64_data, 11)); @@ -557,7 +557,7 @@ static void test_map_int64_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char indef_map_data_1[] = {0xBF, 0x00, 0x18, 0xFF, 0xFF}; -static void test_indef_map_decoding_1(void **_CBOR_UNUSED(_state)) { +static void test_indef_map_decoding_1(void** _state _CBOR_UNUSED) { assert_indef_map_start(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(indef_map_data_1, 5)); @@ -575,13 +575,13 @@ static void test_indef_map_decoding_1(void **_CBOR_UNUSED(_state)) { } unsigned char embedded_tag_data[] = {0xC1}; -static void test_embedded_tag_decoding(void **_CBOR_UNUSED(_state)) { +static void test_embedded_tag_decoding(void** _state _CBOR_UNUSED) { assert_tag_eq(1); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(embedded_tag_data, 1)); } unsigned char int8_tag_data[] = {0xD8, 0xFE}; -static void test_int8_tag_decoding(void **_CBOR_UNUSED(_state)) { +static void test_int8_tag_decoding(void** _state _CBOR_UNUSED) { assert_tag_eq(254); assert_decoder_result(2, CBOR_DECODER_FINISHED, decode(int8_tag_data, 2)); @@ -589,7 +589,7 @@ static void test_int8_tag_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char int16_tag_data[] = {0xD9, 0xFE, 0xFD}; -static void test_int16_tag_decoding(void **_CBOR_UNUSED(_state)) { +static void test_int16_tag_decoding(void** _state _CBOR_UNUSED) { assert_tag_eq(65277); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(int16_tag_data, 3)); @@ -597,7 +597,7 @@ static void test_int16_tag_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char int32_tag_data[] = {0xDA, 0xFE, 0xFD, 0xFC, 0xFB}; -static void test_int32_tag_decoding(void **_CBOR_UNUSED(_state)) { +static void test_int32_tag_decoding(void** _state _CBOR_UNUSED) { assert_tag_eq(4278058235ULL); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(int32_tag_data, 5)); @@ -606,7 +606,7 @@ static void test_int32_tag_decoding(void **_CBOR_UNUSED(_state)) { unsigned char int64_tag_data[] = {0xDB, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8, 0xF7}; -static void test_int64_tag_decoding(void **_CBOR_UNUSED(_state)) { +static void test_int64_tag_decoding(void** _state _CBOR_UNUSED) { assert_tag_eq(18374120213919168759ULL); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(int64_tag_data, 9)); @@ -614,12 +614,12 @@ static void test_int64_tag_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char reserved_byte_data[] = {0xDC}; -static void test_reserved_byte_decoding(void **_CBOR_UNUSED(_state)) { +static void test_reserved_byte_decoding(void** _state _CBOR_UNUSED) { assert_decoder_result(0, CBOR_DECODER_ERROR, decode(reserved_byte_data, 1)); } unsigned char float2_data[] = {0xF9, 0x7B, 0xFF}; -static void test_float2_decoding(void **_CBOR_UNUSED(_state)) { +static void test_float2_decoding(void** _state _CBOR_UNUSED) { assert_half(65504.0f); assert_decoder_result(3, CBOR_DECODER_FINISHED, decode(float2_data, 3)); @@ -627,7 +627,7 @@ static void test_float2_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char float4_data[] = {0xFA, 0x47, 0xC3, 0x50, 0x00}; -static void test_float4_decoding(void **_CBOR_UNUSED(_state)) { +static void test_float4_decoding(void** _state _CBOR_UNUSED) { assert_float(100000.0f); assert_decoder_result(5, CBOR_DECODER_FINISHED, decode(float4_data, 5)); @@ -636,7 +636,7 @@ static void test_float4_decoding(void **_CBOR_UNUSED(_state)) { unsigned char float8_data[] = {0xFB, 0xC0, 0x10, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}; -static void test_float8_decoding(void **_CBOR_UNUSED(_state)) { +static void test_float8_decoding(void** _state _CBOR_UNUSED) { assert_double(-4.1); assert_decoder_result(9, CBOR_DECODER_FINISHED, decode(float8_data, 9)); @@ -644,25 +644,25 @@ static void test_float8_decoding(void **_CBOR_UNUSED(_state)) { } unsigned char false_data[] = {0xF4}; -static void test_false_decoding(void **_CBOR_UNUSED(_state)) { +static void test_false_decoding(void** _state _CBOR_UNUSED) { assert_bool(false); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(false_data, 1)); } unsigned char true_data[] = {0xF5}; -static void test_true_decoding(void **_CBOR_UNUSED(_state)) { +static void test_true_decoding(void** _state _CBOR_UNUSED) { assert_bool(true); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(true_data, 1)); } unsigned char null_data[] = {0xF6}; -static void test_null_decoding(void **_CBOR_UNUSED(_state)) { +static void test_null_decoding(void** _state _CBOR_UNUSED) { assert_nil(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(null_data, 1)); } unsigned char undef_data[] = {0xF7}; -static void test_undef_decoding(void **_CBOR_UNUSED(_state)) { +static void test_undef_decoding(void** _state _CBOR_UNUSED) { assert_undef(); assert_decoder_result(1, CBOR_DECODER_FINISHED, decode(undef_data, 1)); } diff --git a/contrib/libcbor/test/copy_test.c b/contrib/libcbor/test/copy_test.c index 92e210a6a600..727791c2834d 100644 --- a/contrib/libcbor/test/copy_test.c +++ b/contrib/libcbor/test/copy_test.c @@ -11,7 +11,7 @@ cbor_item_t *item, *copy, *tmp; -static void test_uints(void **_CBOR_UNUSED(_state)) { +static void test_uints(void** _state _CBOR_UNUSED) { item = cbor_build_uint8(10); assert_uint8(copy = cbor_copy(item), 10); cbor_decref(&item); @@ -33,7 +33,7 @@ static void test_uints(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_negints(void **_CBOR_UNUSED(_state)) { +static void test_negints(void** _state _CBOR_UNUSED) { item = cbor_build_negint8(10); assert_true(cbor_get_uint8(copy = cbor_copy(item)) == 10); cbor_decref(&item); @@ -55,7 +55,7 @@ static void test_negints(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_def_bytestring(void **_CBOR_UNUSED(_state)) { +static void test_def_bytestring(void** _state _CBOR_UNUSED) { item = cbor_build_bytestring((cbor_data) "abc", 3); assert_memory_equal(cbor_bytestring_handle(copy = cbor_copy(item)), cbor_bytestring_handle(item), 3); @@ -63,7 +63,7 @@ static void test_def_bytestring(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_indef_bytestring(void **_CBOR_UNUSED(_state)) { +static void test_indef_bytestring(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); @@ -78,7 +78,7 @@ static void test_indef_bytestring(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_def_string(void **_CBOR_UNUSED(_state)) { +static void test_def_string(void** _state _CBOR_UNUSED) { item = cbor_build_string("abc"); assert_memory_equal(cbor_string_handle(copy = cbor_copy(item)), cbor_string_handle(item), 3); @@ -86,7 +86,7 @@ static void test_def_string(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_indef_string(void **_CBOR_UNUSED(_state)) { +static void test_indef_string(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); copy = cbor_copy(item); @@ -100,7 +100,7 @@ static void test_indef_string(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_def_array(void **_CBOR_UNUSED(_state)) { +static void test_def_array(void** _state _CBOR_UNUSED) { item = cbor_new_definite_array(1); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); @@ -110,7 +110,7 @@ static void test_def_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&tmp); } -static void test_indef_array(void **_CBOR_UNUSED(_state)) { +static void test_indef_array(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); @@ -120,7 +120,7 @@ static void test_indef_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&tmp); } -static void test_def_map(void **_CBOR_UNUSED(_state)) { +static void test_def_map(void** _state _CBOR_UNUSED) { item = cbor_new_definite_map(1); assert_true(cbor_map_add(item, (struct cbor_pair){ .key = cbor_move(cbor_build_uint8(42)), @@ -133,7 +133,7 @@ static void test_def_map(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_indef_map(void **_CBOR_UNUSED(_state)) { +static void test_indef_map(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true(cbor_map_add(item, (struct cbor_pair){ .key = cbor_move(cbor_build_uint8(42)), @@ -146,7 +146,7 @@ static void test_indef_map(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_tag(void **_CBOR_UNUSED(_state)) { +static void test_tag(void** _state _CBOR_UNUSED) { item = cbor_build_tag(10, cbor_move(cbor_build_uint8(42))); assert_uint8(cbor_move(cbor_tag_item(copy = cbor_copy(item))), 42); @@ -155,14 +155,14 @@ static void test_tag(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_ctrls(void **_CBOR_UNUSED(_state)) { +static void test_ctrls(void** _state _CBOR_UNUSED) { item = cbor_new_null(); assert_true(cbor_is_null(copy = cbor_copy(item))); cbor_decref(&item); cbor_decref(©); } -static void test_floats(void **_CBOR_UNUSED(_state)) { +static void test_floats(void** _state _CBOR_UNUSED) { item = cbor_build_float2(3.14f); assert_true(cbor_float_get_float2(copy = cbor_copy(item)) == cbor_float_get_float2(item)); @@ -182,7 +182,327 @@ static void test_floats(void **_CBOR_UNUSED(_state)) { cbor_decref(©); } -static void test_alloc_failure_simple(void **_CBOR_UNUSED(_state)) { +static void test_definite_uints(void** _state _CBOR_UNUSED) { + item = cbor_build_uint8(10); + assert_uint8(copy = cbor_copy_definite(item), 10); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_negints(void** _state _CBOR_UNUSED) { + item = cbor_build_negint16(10); + assert_true(cbor_get_uint16(copy = cbor_copy_definite(item)) == 10); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_bytestring(void** _state _CBOR_UNUSED) { + item = cbor_build_bytestring((cbor_data) "abc", 3); + assert_memory_equal(cbor_bytestring_handle(copy = cbor_copy_definite(item)), + cbor_bytestring_handle(item), 3); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_indef_bytestring(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_bytestring(); + assert_true(cbor_bytestring_add_chunk( + item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); + + assert_memory_equal(cbor_bytestring_handle(copy = cbor_copy_definite(item)), + "abc", 3); + assert_true(cbor_isa_bytestring(copy)); + assert_true(cbor_bytestring_is_definite(copy)); + assert_size_equal(cbor_bytestring_length(copy), 3); + + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_bytestring_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_bytestring(); + assert_true(cbor_bytestring_add_chunk( + item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); + + WITH_FAILING_MALLOC({ assert_null(cbor_copy_definite(item)); }); + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_string(void** _state _CBOR_UNUSED) { + item = cbor_build_string("abc"); + assert_memory_equal(cbor_string_handle(copy = cbor_copy_definite(item)), + cbor_string_handle(item), 3); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_indef_string(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_string(); + assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); + + assert_memory_equal(cbor_string_handle(copy = cbor_copy_definite(item)), + "abc", 3); + assert_true(cbor_isa_string(copy)); + assert_true(cbor_string_is_definite(copy)); + assert_size_equal(cbor_string_length(copy), 3); + + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_string_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_string(); + assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); + + WITH_FAILING_MALLOC({ assert_null(cbor_copy_definite(item)); }); + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_array(void** _state _CBOR_UNUSED) { + item = cbor_new_definite_array(1); + assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_array(copy)); + assert_true(cbor_array_is_definite(copy)); + assert_size_equal(cbor_array_size(copy), 1); + assert_uint8(tmp = cbor_array_get(copy, 0), 42); + + cbor_decref(&item); + cbor_decref(©); + cbor_decref(&tmp); +} + +static void test_definite_indef_array(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_array(); + assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_array(copy)); + assert_true(cbor_array_is_definite(copy)); + assert_uint8(tmp = cbor_array_get(copy, 0), 42); + + cbor_decref(&item); + cbor_decref(©); + cbor_decref(&tmp); +} + +static void test_definite_indef_array_nested(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_array(); + cbor_item_t* nested_array = cbor_new_indefinite_array(); + assert_true(cbor_array_push(item, cbor_move(nested_array))); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_array(copy)); + assert_true(cbor_array_is_definite(copy)); + assert_size_equal(cbor_array_size(copy), 1); + + tmp = cbor_array_get(copy, 0); + assert_true(cbor_isa_array(tmp)); + assert_true(cbor_array_is_definite(tmp)); + assert_size_equal(cbor_array_size(tmp), 0); + + cbor_decref(&item); + cbor_decref(©); + cbor_decref(&tmp); +} + +static void test_definite_array_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_array(); + assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); + + WITH_FAILING_MALLOC({ assert_null(cbor_copy_definite(item)); }); + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_array_item_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_array(); + assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); + + WITH_MOCK_MALLOC({ assert_null(cbor_copy_definite(item)); }, 3, + // New array, new array data, item copy + MALLOC, MALLOC, MALLOC_FAIL); + + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_map(void** _state _CBOR_UNUSED) { + item = cbor_new_definite_map(1); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(cbor_build_uint8(42)), + .value = cbor_move(cbor_build_uint8(43)), + })); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_map(copy)); + assert_true(cbor_map_is_definite(copy)); + assert_size_equal(cbor_map_size(copy), 1); + assert_uint8(cbor_map_handle(copy)[0].key, 42); + + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_indef_map(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_map(); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(cbor_build_uint8(42)), + .value = cbor_move(cbor_build_uint8(43)), + })); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_map(copy)); + assert_true(cbor_map_is_definite(copy)); + assert_size_equal(cbor_map_size(copy), 1); + assert_uint8(cbor_map_handle(copy)[0].key, 42); + + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_indef_map_nested(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_map(); + cbor_item_t* key = cbor_new_indefinite_array(); + cbor_item_t* value = cbor_new_indefinite_array(); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(key), + .value = cbor_move(value), + })); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_map(copy)); + assert_true(cbor_map_is_definite(copy)); + assert_size_equal(cbor_map_size(copy), 1); + + assert_true(cbor_isa_array(cbor_map_handle(copy)[0].key)); + assert_true(cbor_array_is_definite(cbor_map_handle(copy)[0].key)); + assert_size_equal(cbor_array_size(cbor_map_handle(copy)[0].key), 0); + + assert_true(cbor_isa_array(cbor_map_handle(copy)[0].value)); + assert_true(cbor_array_is_definite(cbor_map_handle(copy)[0].value)); + assert_size_equal(cbor_array_size(cbor_map_handle(copy)[0].value), 0); + + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_map_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_map(); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(cbor_build_uint8(42)), + .value = cbor_move(cbor_build_uint8(43)), + })); + + WITH_FAILING_MALLOC({ assert_null(cbor_copy_definite(item)); }); + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_map_key_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_map(); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(cbor_build_uint8(42)), + .value = cbor_move(cbor_build_uint8(43)), + })); + + WITH_MOCK_MALLOC({ assert_null(cbor_copy_definite(item)); }, 3, + // New map, map data, key copy + MALLOC, MALLOC, MALLOC_FAIL); + + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_map_value_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_new_indefinite_map(); + assert_true(cbor_map_add(item, (struct cbor_pair){ + .key = cbor_move(cbor_build_uint8(42)), + .value = cbor_move(cbor_build_uint8(43)), + })); + + WITH_MOCK_MALLOC({ assert_null(cbor_copy_definite(item)); }, 4, + // New map, map data, key copy, value copy + MALLOC, MALLOC, MALLOC, MALLOC_FAIL); + + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_tag(void** _state _CBOR_UNUSED) { + item = cbor_build_tag(10, cbor_move(cbor_build_uint8(42))); + + copy = cbor_copy_definite(item); + assert_uint8(tmp = cbor_tag_item(copy), 42); + + cbor_decref(&item); + cbor_decref(©); + cbor_decref(&tmp); +} + +static void test_definite_tag_nested(void** _state _CBOR_UNUSED) { + item = cbor_build_tag(10, cbor_move(cbor_new_indefinite_array())); + + copy = cbor_copy_definite(item); + assert_true(cbor_isa_tag(copy)); + + tmp = cbor_tag_item(copy); + assert_true(cbor_isa_array(tmp)); + assert_true(cbor_array_is_definite(tmp)); + assert_size_equal(cbor_array_size(tmp), 0); + + cbor_decref(&item); + cbor_decref(©); + cbor_decref(&tmp); +} + +static void test_definite_tag_alloc_failure(void** _state _CBOR_UNUSED) { + item = cbor_build_tag(10, cbor_move(cbor_build_uint8(42))); + + WITH_FAILING_MALLOC({ assert_null(cbor_copy_definite(item)); }); + assert_size_equal(cbor_refcount(item), 1); + + cbor_decref(&item); +} + +static void test_definite_ctrls(void** _state _CBOR_UNUSED) { + item = cbor_new_null(); + assert_true(cbor_is_null(copy = cbor_copy_definite(item))); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_definite_floats(void** _state _CBOR_UNUSED) { + item = cbor_build_float2(3.14f); + assert_true(cbor_float_get_float2(copy = cbor_copy_definite(item)) == + cbor_float_get_float2(item)); + cbor_decref(&item); + cbor_decref(©); + + item = cbor_build_float4(3.14f); + assert_true(cbor_float_get_float4(copy = cbor_copy_definite(item)) == + cbor_float_get_float4(item)); + cbor_decref(&item); + cbor_decref(©); + + item = cbor_build_float8(3.14); + assert_true(cbor_float_get_float8(copy = cbor_copy_definite(item)) == + cbor_float_get_float8(item)); + cbor_decref(&item); + cbor_decref(©); +} + +static void test_alloc_failure_simple(void** _state _CBOR_UNUSED) { item = cbor_build_uint8(10); WITH_FAILING_MALLOC({ assert_null(cbor_copy(item)); }); @@ -191,7 +511,7 @@ static void test_alloc_failure_simple(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_bytestring_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); @@ -202,7 +522,7 @@ static void test_bytestring_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_bytestring_chunk_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_chunk_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); @@ -213,7 +533,7 @@ static void test_bytestring_chunk_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_bytestring_chunk_append_failure(void **_CBOR_UNUSED(_state)) { +static void test_bytestring_chunk_append_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); @@ -228,7 +548,7 @@ static void test_bytestring_chunk_append_failure(void **_CBOR_UNUSED(_state)) { } static void test_bytestring_second_chunk_alloc_failure( - void **_CBOR_UNUSED(_state)) { + void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring((cbor_data) "abc", 3)))); @@ -245,7 +565,7 @@ static void test_bytestring_second_chunk_alloc_failure( cbor_decref(&item); } -static void test_string_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_string_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); @@ -255,7 +575,7 @@ static void test_string_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_string_chunk_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_string_chunk_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); @@ -265,7 +585,7 @@ static void test_string_chunk_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_string_chunk_append_failure(void **_CBOR_UNUSED(_state)) { +static void test_string_chunk_append_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); @@ -278,8 +598,7 @@ static void test_string_chunk_append_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_string_second_chunk_alloc_failure( - void **_CBOR_UNUSED(_state)) { +static void test_string_second_chunk_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_string(); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("abc")))); assert_true(cbor_string_add_chunk(item, cbor_move(cbor_build_string("def")))); @@ -294,7 +613,7 @@ static void test_string_second_chunk_alloc_failure( cbor_decref(&item); } -static void test_array_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_array_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); @@ -304,7 +623,7 @@ static void test_array_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_array_item_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_array_item_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); @@ -317,7 +636,7 @@ static void test_array_item_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_array_push_failure(void **_CBOR_UNUSED(_state)) { +static void test_array_push_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); @@ -330,7 +649,7 @@ static void test_array_push_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_array_second_item_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_array_second_item_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(42)))); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(43)))); @@ -344,7 +663,7 @@ static void test_array_second_item_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_map_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_map_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true( cbor_map_add(item, (struct cbor_pair){cbor_move(cbor_build_uint8(42)), @@ -356,7 +675,7 @@ static void test_map_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_map_key_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_map_key_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true( cbor_map_add(item, (struct cbor_pair){cbor_move(cbor_build_uint8(42)), @@ -370,7 +689,7 @@ static void test_map_key_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_map_value_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_map_value_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true( cbor_map_add(item, (struct cbor_pair){cbor_move(cbor_build_uint8(42)), @@ -384,7 +703,7 @@ static void test_map_value_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_map_add_failure(void **_CBOR_UNUSED(_state)) { +static void test_map_add_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true( cbor_map_add(item, (struct cbor_pair){cbor_move(cbor_build_uint8(42)), @@ -398,7 +717,7 @@ static void test_map_add_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_map_second_key_failure(void **_CBOR_UNUSED(_state)) { +static void test_map_second_key_failure(void** _state _CBOR_UNUSED) { item = cbor_new_indefinite_map(); assert_true( cbor_map_add(item, (struct cbor_pair){cbor_move(cbor_build_uint8(42)), @@ -415,7 +734,7 @@ static void test_map_second_key_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_tag_item_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_tag_item_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_build_tag(1, cbor_move(cbor_build_uint8(42))); WITH_FAILING_MALLOC({ assert_null(cbor_copy(item)); }); @@ -424,7 +743,7 @@ static void test_tag_item_alloc_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_tag_alloc_failure(void **_CBOR_UNUSED(_state)) { +static void test_tag_alloc_failure(void** _state _CBOR_UNUSED) { item = cbor_build_tag(1, cbor_move(cbor_build_uint8(42))); WITH_MOCK_MALLOC({ assert_null(cbor_copy(item)); }, 2, @@ -470,6 +789,30 @@ int main(void) { cmocka_unit_test(test_map_second_key_failure), cmocka_unit_test(test_tag_item_alloc_failure), cmocka_unit_test(test_tag_alloc_failure), + cmocka_unit_test(test_definite_uints), + cmocka_unit_test(test_definite_negints), + cmocka_unit_test(test_definite_bytestring), + cmocka_unit_test(test_definite_bytestring_alloc_failure), + cmocka_unit_test(test_definite_indef_bytestring), + cmocka_unit_test(test_definite_string), + cmocka_unit_test(test_definite_indef_string), + cmocka_unit_test(test_definite_string_alloc_failure), + cmocka_unit_test(test_definite_array), + cmocka_unit_test(test_definite_indef_array), + cmocka_unit_test(test_definite_indef_array_nested), + cmocka_unit_test(test_definite_array_alloc_failure), + cmocka_unit_test(test_definite_array_item_alloc_failure), + cmocka_unit_test(test_definite_map), + cmocka_unit_test(test_definite_indef_map), + cmocka_unit_test(test_definite_indef_map_nested), + cmocka_unit_test(test_definite_map_alloc_failure), + cmocka_unit_test(test_definite_map_key_alloc_failure), + cmocka_unit_test(test_definite_map_value_alloc_failure), + cmocka_unit_test(test_definite_tag), + cmocka_unit_test(test_definite_tag_nested), + cmocka_unit_test(test_definite_tag_alloc_failure), + cmocka_unit_test(test_definite_ctrls), + cmocka_unit_test(test_definite_floats), }; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/contrib/libcbor/test/float_ctrl_encoders_test.c b/contrib/libcbor/test/float_ctrl_encoders_test.c index 8940106d91d3..f7d302472666 100644 --- a/contrib/libcbor/test/float_ctrl_encoders_test.c +++ b/contrib/libcbor/test/float_ctrl_encoders_test.c @@ -11,24 +11,24 @@ unsigned char buffer[512]; -static void test_bools(void **_CBOR_UNUSED(_state)) { +static void test_bools(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_bool(false, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF4}), 1); assert_size_equal(1, cbor_encode_bool(true, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF5}), 1); } -static void test_null(void **_CBOR_UNUSED(_state)) { +static void test_null(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_null(buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF6}), 1); } -static void test_undef(void **_CBOR_UNUSED(_state)) { +static void test_undef(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_undef(buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF7}), 1); } -static void test_break(void **_CBOR_UNUSED(_state)) { +static void test_break(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_break(buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xFF}), 1); } @@ -39,7 +39,7 @@ static void assert_half_float_codec_identity(void) { unsigned char secondary_buffer[3]; struct cbor_load_result res; // Load and check data in buffer - cbor_item_t *half_float = cbor_load(buffer, 3, &res); + cbor_item_t* half_float = cbor_load(buffer, 3, &res); assert_size_equal(res.error.code, CBOR_ERR_NONE); assert_true(cbor_isa_float_ctrl(half_float)); assert_true(cbor_is_float(half_float)); @@ -51,7 +51,7 @@ static void assert_half_float_codec_identity(void) { cbor_decref(&half_float); } -static void test_half(void **_CBOR_UNUSED(_state)) { +static void test_half(void** _state _CBOR_UNUSED) { assert_size_equal(3, cbor_encode_half(1.5f, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF9, 0x3E, 0x00}), 3); assert_half_float_codec_identity(); @@ -117,21 +117,17 @@ static void test_half(void **_CBOR_UNUSED(_state)) { assert_half_float_codec_identity(); } -static void test_half_special(void **_CBOR_UNUSED(_state)) { +static void test_half_special(void** _state _CBOR_UNUSED) { assert_size_equal(3, cbor_encode_half(NAN, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF9, 0x7E, 0x00}), 3); assert_half_float_codec_identity(); - // We discard all information bits in half-float NaNs. This is - // not required for the core CBOR protocol (it is only a suggestion in - // Section 3.9). - // See https://github.com/PJK/libcbor/issues/215 assert_size_equal(3, cbor_encode_half(nanf("2"), buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF9, 0x7E, 0x00}), 3); assert_half_float_codec_identity(); } -static void test_half_infinity(void **_CBOR_UNUSED(_state)) { +static void test_half_infinity(void** _state _CBOR_UNUSED) { assert_size_equal(3, cbor_encode_half(INFINITY, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xF9, 0x7C, 0x00}), 3); assert_half_float_codec_identity(); @@ -141,7 +137,7 @@ static void test_half_infinity(void **_CBOR_UNUSED(_state)) { assert_half_float_codec_identity(); } -static void test_float(void **_CBOR_UNUSED(_state)) { +static void test_float(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_single(3.4028234663852886e+38, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xFA, 0x7F, 0x7F, 0xFF, 0xFF}), 5); @@ -150,12 +146,9 @@ static void test_float(void **_CBOR_UNUSED(_state)) { assert_memory_equal(buffer, ((unsigned char[]){0xFA, 0x7F, 0xC0, 0x00, 0x00}), 5); -#ifndef _WIN32 - // TODO: https://github.com/PJK/libcbor/issues/271 assert_size_equal(5, cbor_encode_single(nanf("3"), buffer, 512)); - assert_memory_equal(buffer, ((unsigned char[]){0xFA, 0x7F, 0xC0, 0x00, 0x03}), + assert_memory_equal(buffer, ((unsigned char[]){0xFA, 0x7F, 0xC0, 0x00, 0x00}), 5); -#endif assert_size_equal(5, cbor_encode_single(strtof("Inf", NULL), buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xFA, 0x7F, 0x80, 0x00, 0x00}), @@ -166,7 +159,7 @@ static void test_float(void **_CBOR_UNUSED(_state)) { 5); } -static void test_double(void **_CBOR_UNUSED(_state)) { +static void test_double(void** _state _CBOR_UNUSED) { assert_size_equal(9, cbor_encode_double(1.0e+300, buffer, 512)); assert_memory_equal( buffer, @@ -179,14 +172,11 @@ static void test_double(void **_CBOR_UNUSED(_state)) { ((unsigned char[]){0xFB, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), 9); -#ifndef _WIN32 - // TODO: https://github.com/PJK/libcbor/issues/271 assert_size_equal(9, cbor_encode_double(nan("3"), buffer, 512)); assert_memory_equal( buffer, - ((unsigned char[]){0xFB, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + ((unsigned char[]){0xFB, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), 9); -#endif assert_size_equal(9, cbor_encode_double(strtod("Inf", NULL), buffer, 512)); assert_memory_equal( diff --git a/contrib/libcbor/test/float_ctrl_test.c b/contrib/libcbor/test/float_ctrl_test.c index c939486877e7..5a19d58b68e1 100644 --- a/contrib/libcbor/test/float_ctrl_test.c +++ b/contrib/libcbor/test/float_ctrl_test.c @@ -17,14 +17,14 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *float_ctrl; +cbor_item_t* float_ctrl; struct cbor_load_result res; static const float eps = 0.00001f; unsigned char float2_data[] = {0xF9, 0x7B, 0xFF}; -static void test_float2(void **_CBOR_UNUSED(_state)) { +static void test_float2(void** _state _CBOR_UNUSED) { float_ctrl = cbor_load(float2_data, 3, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); assert_true(cbor_is_float(float_ctrl)); @@ -37,7 +37,7 @@ static void test_float2(void **_CBOR_UNUSED(_state)) { unsigned char float4_data[] = {0xFA, 0x47, 0xC3, 0x50, 0x00}; -static void test_float4(void **_CBOR_UNUSED(_state)) { +static void test_float4(void** _state _CBOR_UNUSED) { float_ctrl = cbor_load(float4_data, 5, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); assert_true(cbor_is_float(float_ctrl)); @@ -51,7 +51,7 @@ static void test_float4(void **_CBOR_UNUSED(_state)) { unsigned char float8_data[] = {0xFB, 0x7E, 0x37, 0xE4, 0x3C, 0x88, 0x00, 0x75, 0x9C}; -static void test_float8(void **_CBOR_UNUSED(_state)) { +static void test_float8(void** _state _CBOR_UNUSED) { float_ctrl = cbor_load(float8_data, 9, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); assert_true(cbor_is_float(float_ctrl)); @@ -66,9 +66,11 @@ static void test_float8(void **_CBOR_UNUSED(_state)) { unsigned char null_data[] = {0xF6}; -static void test_null(void **_CBOR_UNUSED(_state)) { +static void test_null(void** _state _CBOR_UNUSED) { float_ctrl = cbor_load(null_data, 1, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); + assert_true(cbor_float_ctrl_is_ctrl(float_ctrl)); + assert_true(cbor_float_get_width(float_ctrl) == CBOR_FLOAT_0); assert_true(cbor_is_null(float_ctrl)); cbor_decref(&float_ctrl); assert_null(float_ctrl); @@ -76,9 +78,11 @@ static void test_null(void **_CBOR_UNUSED(_state)) { unsigned char undef_data[] = {0xF7}; -static void test_undef(void **_CBOR_UNUSED(_state)) { +static void test_undef(void** _state _CBOR_UNUSED) { float_ctrl = cbor_load(undef_data, 1, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); + assert_true(cbor_float_ctrl_is_ctrl(float_ctrl)); + assert_true(cbor_float_get_width(float_ctrl) == CBOR_FLOAT_0); assert_true(cbor_is_undef(float_ctrl)); cbor_decref(&float_ctrl); assert_null(float_ctrl); @@ -86,10 +90,12 @@ static void test_undef(void **_CBOR_UNUSED(_state)) { unsigned char bool_data[] = {0xF4, 0xF5}; -static void test_bool(void **_CBOR_UNUSED(_state)) { +static void test_bool(void** _state _CBOR_UNUSED) { _CBOR_TEST_DISABLE_ASSERT({ float_ctrl = cbor_load(bool_data, 1, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); + assert_true(cbor_float_ctrl_is_ctrl(float_ctrl)); + assert_true(cbor_float_get_width(float_ctrl) == CBOR_FLOAT_0); assert_true(cbor_is_bool(float_ctrl)); assert_false(cbor_get_bool(float_ctrl)); cbor_set_bool(float_ctrl, true); @@ -100,6 +106,8 @@ static void test_bool(void **_CBOR_UNUSED(_state)) { float_ctrl = cbor_load(bool_data + 1, 1, &res); assert_true(cbor_isa_float_ctrl(float_ctrl)); + assert_true(cbor_float_ctrl_is_ctrl(float_ctrl)); + assert_true(cbor_float_get_width(float_ctrl) == CBOR_FLOAT_0); assert_true(cbor_is_bool(float_ctrl)); assert_true(cbor_get_bool(float_ctrl)); cbor_set_bool(float_ctrl, false); @@ -110,7 +118,7 @@ static void test_bool(void **_CBOR_UNUSED(_state)) { }); } -static void test_float_ctrl_creation(void **_CBOR_UNUSED(_state)) { +static void test_float_ctrl_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_ctrl()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_float2()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_float4()); }); @@ -119,12 +127,24 @@ static void test_float_ctrl_creation(void **_CBOR_UNUSED(_state)) { WITH_FAILING_MALLOC({ assert_null(cbor_new_undef()); }); WITH_FAILING_MALLOC({ assert_null(cbor_build_bool(false)); }); - WITH_FAILING_MALLOC({ assert_null(cbor_build_float2(3.14)); }); - WITH_FAILING_MALLOC({ assert_null(cbor_build_float4(3.14)); }); + WITH_FAILING_MALLOC({ assert_null(cbor_build_float2(3.14f)); }); + WITH_FAILING_MALLOC({ assert_null(cbor_build_float4(3.14f)); }); WITH_FAILING_MALLOC({ assert_null(cbor_build_float8(3.14)); }); WITH_FAILING_MALLOC({ assert_null(cbor_build_ctrl(0xAF)); }); } +static void test_ctrl_on_float(void** _state _CBOR_UNUSED) { + float_ctrl = cbor_build_float4(3.14f); + assert_non_null(float_ctrl); + assert_true(cbor_is_float(float_ctrl)); + assert_false(cbor_float_ctrl_is_ctrl(float_ctrl)); + assert_false(cbor_is_null(float_ctrl)); + assert_false(cbor_is_undef(float_ctrl)); + assert_false(cbor_is_bool(float_ctrl)); + cbor_decref(&float_ctrl); + assert_null(float_ctrl); +} + int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(test_float2), @@ -134,6 +154,7 @@ int main(void) { cmocka_unit_test(test_undef), cmocka_unit_test(test_bool), cmocka_unit_test(test_float_ctrl_creation), + cmocka_unit_test(test_ctrl_on_float), }; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/contrib/libcbor/test/fuzz_test.c b/contrib/libcbor/test/fuzz_test.c index a02ed7ea9287..5effadd64774 100644 --- a/contrib/libcbor/test/fuzz_test.c +++ b/contrib/libcbor/test/fuzz_test.c @@ -18,7 +18,7 @@ #endif #ifdef PRINT_FUZZ -static void printmem(const unsigned char *ptr, size_t length) { +static void printmem(const unsigned char* ptr, size_t length) { for (size_t i = 0; i < length; i++) printf("%02X", ptr[i]); printf("\n"); } @@ -26,7 +26,7 @@ static void printmem(const unsigned char *ptr, size_t length) { unsigned seed; -void *mock_malloc(size_t size) { +void* mock_malloc(size_t size) { if (size > (1 << 19)) return NULL; else @@ -34,11 +34,11 @@ void *mock_malloc(size_t size) { } static void run_round(void) { - cbor_item_t *item; + cbor_item_t* item; struct cbor_load_result res; size_t length = rand() % MAXLEN + 1; - unsigned char *data = malloc(length); + unsigned char* data = malloc(length); for (size_t i = 0; i < length; i++) { data[i] = rand() % 0xFF; } @@ -55,7 +55,7 @@ static void run_round(void) { free(data); } -static void fuzz(void **_CBOR_UNUSED(_state)) { +static void fuzz(void** _state _CBOR_UNUSED) { cbor_set_allocs(mock_malloc, realloc, free); printf("Fuzzing %llu rounds of up to %llu bytes with seed %u\n", ROUNDS, MAXLEN, seed); @@ -67,7 +67,7 @@ static void fuzz(void **_CBOR_UNUSED(_state)) { (ROUNDS * MAXLEN) / 1024); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc > 1) seed = (unsigned)strtoul(argv[1], NULL, 10); else diff --git a/contrib/libcbor/test/map_encoders_test.c b/contrib/libcbor/test/map_encoders_test.c index bbb5fdc1ad91..40cfe33631ed 100644 --- a/contrib/libcbor/test/map_encoders_test.c +++ b/contrib/libcbor/test/map_encoders_test.c @@ -10,18 +10,18 @@ unsigned char buffer[512]; -static void test_embedded_map_start(void **_CBOR_UNUSED(_state)) { +static void test_embedded_map_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_map_start(1, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xA1}), 1); } -static void test_map_start(void **_CBOR_UNUSED(_state)) { +static void test_map_start(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_map_start(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xBA, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_indef_map_start(void **_CBOR_UNUSED(_state)) { +static void test_indef_map_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_indef_map_start(buffer, 512)); assert_size_equal(0, cbor_encode_indef_map_start(buffer, 0)); assert_memory_equal(buffer, ((unsigned char[]){0xBF}), 1); diff --git a/contrib/libcbor/test/map_test.c b/contrib/libcbor/test/map_test.c index 11bd7a8d7242..1e8492ef49ea 100644 --- a/contrib/libcbor/test/map_test.c +++ b/contrib/libcbor/test/map_test.c @@ -17,12 +17,12 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *map; +cbor_item_t* map; struct cbor_load_result res; unsigned char empty_map[] = {0xA0}; -static void test_empty_map(void **_CBOR_UNUSED(_state)) { +static void test_empty_map(void** _state _CBOR_UNUSED) { map = cbor_load(empty_map, 1, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -37,7 +37,7 @@ static void test_empty_map(void **_CBOR_UNUSED(_state)) { unsigned char simple_map[] = {0xA2, 0x01, 0x02, 0x03, 0x04}; /* {1: 2, 3: 4} */ -static void test_simple_map(void **_CBOR_UNUSED(_state)) { +static void test_simple_map(void** _state _CBOR_UNUSED) { map = cbor_load(simple_map, 5, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -45,7 +45,7 @@ static void test_simple_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_definite(map)); assert_true(cbor_map_size(map) == 2); assert_true(res.read == 5); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_uint8(handle[0].key, 1); assert_uint8(handle[0].value, 2); assert_uint8(handle[1].key, 3); @@ -57,7 +57,7 @@ static void test_simple_map(void **_CBOR_UNUSED(_state)) { unsigned char simple_indef_map[] = {0xBF, 0x01, 0x02, 0x03, 0x04, 0xFF}; /* {_ 1: 2, 3: 4} */ -static void test_indef_simple_map(void **_CBOR_UNUSED(_state)) { +static void test_indef_simple_map(void** _state _CBOR_UNUSED) { map = cbor_load(simple_indef_map, 6, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -65,7 +65,7 @@ static void test_indef_simple_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_indefinite(map)); assert_true(cbor_map_size(map) == 2); assert_true(res.read == 6); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_uint8(handle[0].key, 1); assert_uint8(handle[0].value, 2); assert_uint8(handle[1].key, 3); @@ -84,7 +84,7 @@ unsigned char def_nested_map[] = { 0x74, 0x69, 0x74, 0x6C, 0x65, 0x70, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x20, 0x67, 0x6C, 0x6F, 0x73, 0x73, 0x61, 0x72, 0x79}; -static void test_def_nested_map(void **_CBOR_UNUSED(_state)) { +static void test_def_nested_map(void** _state _CBOR_UNUSED) { map = cbor_load(def_nested_map, 34, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -92,10 +92,10 @@ static void test_def_nested_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_definite(map)); assert_true(cbor_map_size(map) == 1); assert_true(res.read == 34); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_true(cbor_typeof(handle[0].key) == CBOR_TYPE_STRING); assert_true(cbor_typeof(handle[0].value) == CBOR_TYPE_MAP); - struct cbor_pair *inner_handle = cbor_map_handle(handle[0].value); + struct cbor_pair* inner_handle = cbor_map_handle(handle[0].value); assert_true(cbor_typeof(inner_handle[0].key) == CBOR_TYPE_STRING); assert_true(cbor_typeof(inner_handle[0].value) == CBOR_TYPE_STRING); assert_memory_equal(cbor_string_handle(inner_handle[0].value), @@ -108,7 +108,7 @@ unsigned char streamed_key_map[] = {0xA1, 0x7F, 0x61, 0x61, 0x61, 0x62, 0xFF, 0xA0}; /* '{ (_"a" "b"): {}}' */ -static void test_streamed_key_map(void **_CBOR_UNUSED(_state)) { +static void test_streamed_key_map(void** _state _CBOR_UNUSED) { map = cbor_load(streamed_key_map, 8, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -116,7 +116,7 @@ static void test_streamed_key_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_definite(map)); assert_true(cbor_map_size(map) == 1); assert_true(res.read == 8); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_true(cbor_typeof(handle[0].key) == CBOR_TYPE_STRING); assert_true(cbor_string_is_indefinite(handle[0].key)); assert_size_equal(cbor_string_chunk_count(handle[0].key), 2); @@ -130,7 +130,7 @@ unsigned char streamed_kv_map[] = {0xA1, 0x7F, 0x61, 0x61, 0x61, 0x62, 0xFF, 0x7F, 0x61, 0x63, 0x61, 0x64, 0xFF}; /* '{ (_"a" "b"): (_"c", "d")}' */ -static void test_streamed_kv_map(void **_CBOR_UNUSED(_state)) { +static void test_streamed_kv_map(void** _state _CBOR_UNUSED) { map = cbor_load(streamed_kv_map, 13, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -138,7 +138,7 @@ static void test_streamed_kv_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_definite(map)); assert_size_equal(cbor_map_size(map), 1); assert_size_equal(res.read, 13); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_true(cbor_typeof(handle[0].key) == CBOR_TYPE_STRING); assert_true(cbor_string_is_indefinite(handle[0].key)); assert_size_equal(cbor_string_chunk_count(handle[0].key), 2); @@ -157,7 +157,7 @@ unsigned char streamed_streamed_kv_map[] = {0xBF, 0x7F, 0x61, 0x61, 0x61, 0x61, 0x64, 0xFF, 0xFF}; /* '{_ (_"a" "b"): (_"c", "d")}' */ -static void test_streamed_streamed_kv_map(void **_CBOR_UNUSED(_state)) { +static void test_streamed_streamed_kv_map(void** _state _CBOR_UNUSED) { map = cbor_load(streamed_streamed_kv_map, 14, &res); assert_non_null(map); assert_true(cbor_typeof(map) == CBOR_TYPE_MAP); @@ -165,7 +165,7 @@ static void test_streamed_streamed_kv_map(void **_CBOR_UNUSED(_state)) { assert_true(cbor_map_is_indefinite(map)); assert_size_equal(cbor_map_size(map), 1); assert_size_equal(res.read, 14); - struct cbor_pair *handle = cbor_map_handle(map); + struct cbor_pair* handle = cbor_map_handle(map); assert_true(cbor_typeof(handle[0].key) == CBOR_TYPE_STRING); assert_true(cbor_string_is_indefinite(handle[0].key)); assert_size_equal(cbor_string_chunk_count(handle[0].key), 2); @@ -179,10 +179,10 @@ static void test_streamed_streamed_kv_map(void **_CBOR_UNUSED(_state)) { assert_null(map); } -static void test_map_add_full(void **_CBOR_UNUSED(_state)) { +static void test_map_add_full(void** _state _CBOR_UNUSED) { map = cbor_new_definite_map(0); - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_false(cbor_map_add(map, (struct cbor_pair){.key = one, .value = two})); @@ -191,15 +191,15 @@ static void test_map_add_full(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_map_add_too_big_to_realloc(void **_CBOR_UNUSED(_state)) { +static void test_map_add_too_big_to_realloc(void** _state _CBOR_UNUSED) { map = cbor_new_indefinite_map(); - struct _cbor_map_metadata *metadata = - (struct _cbor_map_metadata *)&map->metadata; + struct _cbor_map_metadata* metadata = + (struct _cbor_map_metadata*)&map->metadata; // Pretend we already have a huge memory block metadata->allocated = SIZE_MAX; metadata->end_ptr = SIZE_MAX; - cbor_item_t *one = cbor_build_uint8(1); - cbor_item_t *two = cbor_build_uint8(2); + cbor_item_t* one = cbor_build_uint8(1); + cbor_item_t* two = cbor_build_uint8(2); assert_false(cbor_map_add(map, (struct cbor_pair){.key = one, .value = two})); @@ -210,7 +210,7 @@ static void test_map_add_too_big_to_realloc(void **_CBOR_UNUSED(_state)) { cbor_decref(&two); } -static void test_map_creation(void **_CBOR_UNUSED(_state)) { +static void test_map_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_definite_map(42)); }); WITH_MOCK_MALLOC({ assert_null(cbor_new_definite_map(42)); }, 2, MALLOC, MALLOC_FAIL); @@ -218,12 +218,12 @@ static void test_map_creation(void **_CBOR_UNUSED(_state)) { WITH_FAILING_MALLOC({ assert_null(cbor_new_indefinite_map()); }); } -static void test_map_add(void **_CBOR_UNUSED(_state)) { +static void test_map_add(void** _state _CBOR_UNUSED) { WITH_MOCK_MALLOC( { - cbor_item_t *map = cbor_new_indefinite_map(); - cbor_item_t *key = cbor_build_uint8(0); - cbor_item_t *value = cbor_build_bool(true); + cbor_item_t* map = cbor_new_indefinite_map(); + cbor_item_t* key = cbor_build_uint8(0); + cbor_item_t* value = cbor_build_bool(true); assert_false( cbor_map_add(map, (struct cbor_pair){.key = key, .value = value})); @@ -238,19 +238,32 @@ static void test_map_add(void **_CBOR_UNUSED(_state)) { } static unsigned char test_indef_map[] = {0xBF, 0x01, 0x02, 0x03, 0x04, 0xFF}; -static void test_indef_map_decode(void **_CBOR_UNUSED(_state)) { +static void test_indef_map_decode(void** _state _CBOR_UNUSED) { WITH_MOCK_MALLOC( { - cbor_item_t *map; + cbor_item_t* map; struct cbor_load_result res; map = cbor_load(test_indef_map, 6, &res); assert_null(map); - assert_size_equal(res.error.code, CBOR_ERR_MEMERROR); + assert_int_equal(res.error.code, CBOR_ERR_MEMERROR); }, 4, MALLOC, MALLOC, MALLOC, REALLOC_FAIL); } +// The value in the third pair is missing, 0xFF instead. +static unsigned char test_break_in_def_map[] = {0xA3, 0x30, 0x30, 0x30, + 0x30, 0x00, 0xFF}; +static void test_break_in_def_map_decode(void** _state _CBOR_UNUSED) { + cbor_item_t* map; + struct cbor_load_result res; + map = cbor_load(test_break_in_def_map, 7, &res); + + assert_null(map); + assert_int_equal(res.error.code, CBOR_ERR_SYNTAXERROR); + assert_size_equal(res.error.position, 7); +} + int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(test_empty_map), @@ -265,6 +278,7 @@ int main(void) { cmocka_unit_test(test_map_creation), cmocka_unit_test(test_map_add), cmocka_unit_test(test_indef_map_decode), + cmocka_unit_test(test_break_in_def_map_decode), }; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/contrib/libcbor/test/memory_utils_test.c b/contrib/libcbor/test/memory_utils_test.c index 6cf07c7da934..e10551c7b502 100644 --- a/contrib/libcbor/test/memory_utils_test.c +++ b/contrib/libcbor/test/memory_utils_test.c @@ -9,7 +9,7 @@ #include <string.h> #include "assertions.h" -static void test_safe_multiply(void **_CBOR_UNUSED(_state)) { +static void test_safe_multiply(void** _state _CBOR_UNUSED) { assert_true(_cbor_safe_to_multiply(1, 1)); assert_true(_cbor_safe_to_multiply(SIZE_MAX, 0)); assert_true(_cbor_safe_to_multiply(SIZE_MAX, 1)); @@ -17,7 +17,7 @@ static void test_safe_multiply(void **_CBOR_UNUSED(_state)) { assert_false(_cbor_safe_to_multiply(SIZE_MAX, SIZE_MAX)); } -static void test_safe_add(void **_CBOR_UNUSED(_state)) { +static void test_safe_add(void** _state _CBOR_UNUSED) { assert_true(_cbor_safe_to_add(1, 1)); assert_true(_cbor_safe_to_add(SIZE_MAX - 1, 1)); assert_true(_cbor_safe_to_add(SIZE_MAX, 0)); @@ -28,7 +28,7 @@ static void test_safe_add(void **_CBOR_UNUSED(_state)) { assert_false(_cbor_safe_to_add(SIZE_MAX - 1, SIZE_MAX - 1)); } -static void test_safe_signalling_add(void **_CBOR_UNUSED(_state)) { +static void test_safe_signalling_add(void** _state _CBOR_UNUSED) { assert_size_equal(_cbor_safe_signaling_add(1, 2), 3); assert_size_equal(_cbor_safe_signaling_add(0, 1), 0); assert_size_equal(_cbor_safe_signaling_add(0, SIZE_MAX), 0); @@ -36,8 +36,8 @@ static void test_safe_signalling_add(void **_CBOR_UNUSED(_state)) { assert_size_equal(_cbor_safe_signaling_add(1, SIZE_MAX - 1), SIZE_MAX); } -static void test_realloc_multiple(void **_CBOR_UNUSED(_state)) { - unsigned char *data = malloc(1); +static void test_realloc_multiple(void** _state _CBOR_UNUSED) { + unsigned char* data = malloc(1); data[0] = 0x2a; data = _cbor_realloc_multiple(data, /*item_size=*/1, /*item_count=*/10); diff --git a/contrib/libcbor/test/negint_encoders_test.c b/contrib/libcbor/test/negint_encoders_test.c index e9230fbe42ae..f4e52c8c37d0 100644 --- a/contrib/libcbor/test/negint_encoders_test.c +++ b/contrib/libcbor/test/negint_encoders_test.c @@ -10,31 +10,31 @@ unsigned char buffer[512]; -static void test_embedded_negint8(void **_CBOR_UNUSED(_state)) { +static void test_embedded_negint8(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_negint8(14, buffer, 512)); assert_memory_equal(buffer, (unsigned char[]){0x2E}, 1); } -static void test_negint8(void **_CBOR_UNUSED(_state)) { +static void test_negint8(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_negint8(180, buffer, 1)); assert_size_equal(2, cbor_encode_negint8(255, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x38, 0xFF}), 2); } -static void test_negint16(void **_CBOR_UNUSED(_state)) { +static void test_negint16(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_negint16(1000, buffer, 2)); assert_size_equal(3, cbor_encode_negint16(1000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x39, 0x03, 0xE8}), 3); } -static void test_negint32(void **_CBOR_UNUSED(_state)) { +static void test_negint32(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_negint32(1000000, buffer, 4)); assert_size_equal(5, cbor_encode_negint32(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x3A, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_negint64(void **_CBOR_UNUSED(_state)) { +static void test_negint64(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_negint64(18446744073709551615ULL, buffer, 8)); assert_size_equal(9, @@ -45,7 +45,7 @@ static void test_negint64(void **_CBOR_UNUSED(_state)) { 9); } -static void test_unspecified(void **_CBOR_UNUSED(_state)) { +static void test_unspecified(void** _state _CBOR_UNUSED) { assert_size_equal(9, cbor_encode_negint(18446744073709551615ULL, buffer, 512)); assert_memory_equal( diff --git a/contrib/libcbor/test/negint_test.c b/contrib/libcbor/test/negint_test.c index 66e7445b7f23..1df7d53fd526 100644 --- a/contrib/libcbor/test/negint_test.c +++ b/contrib/libcbor/test/negint_test.c @@ -9,7 +9,7 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *number; +cbor_item_t* number; struct cbor_load_result res; unsigned char data1[] = {0x22, 0xFF}; @@ -19,7 +19,7 @@ unsigned char data4[] = {0x3a, 0xa5, 0xf7, 0x02, 0xb3, 0xFF}; unsigned char data5[] = {0x3b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3, 0xFF}; -static void test_very_short_int(void **_CBOR_UNUSED(_state)) { +static void test_very_short_int(void** _state _CBOR_UNUSED) { number = cbor_load(data1, 2, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_NEGINT); assert_true(cbor_int_get_width(number) == CBOR_INT_8); @@ -33,7 +33,7 @@ static void test_very_short_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_short_int(void **_CBOR_UNUSED(_state)) { +static void test_short_int(void** _state _CBOR_UNUSED) { number = cbor_load(data2, 3, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_NEGINT); assert_true(cbor_int_get_width(number) == CBOR_INT_8); @@ -47,7 +47,7 @@ static void test_short_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_half_int(void **_CBOR_UNUSED(_state)) { +static void test_half_int(void** _state _CBOR_UNUSED) { number = cbor_load(data3, 5, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_NEGINT); assert_true(cbor_int_get_width(number) == CBOR_INT_16); @@ -61,7 +61,7 @@ static void test_half_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_int(void **_CBOR_UNUSED(_state)) { +static void test_int(void** _state _CBOR_UNUSED) { number = cbor_load(data4, 6, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_NEGINT); assert_true(cbor_int_get_width(number) == CBOR_INT_32); @@ -75,7 +75,7 @@ static void test_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_long_int(void **_CBOR_UNUSED(_state)) { +static void test_long_int(void** _state _CBOR_UNUSED) { number = cbor_load(data5, 10, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_NEGINT); assert_true(cbor_int_get_width(number) == CBOR_INT_64); @@ -89,7 +89,7 @@ static void test_long_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_int_creation(void **_CBOR_UNUSED(_state)) { +static void test_int_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_int8()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_int16()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_int32()); }); diff --git a/contrib/libcbor/test/pretty_printer_test.c b/contrib/libcbor/test/pretty_printer_test.c index 6ea40c0e7ce2..3652ef304787 100644 --- a/contrib/libcbor/test/pretty_printer_test.c +++ b/contrib/libcbor/test/pretty_printer_test.c @@ -11,17 +11,17 @@ #include "assertions.h" #include "cbor.h" -void assert_describe_result(cbor_item_t *item, char *expected_result) { +void assert_describe_result(cbor_item_t* item, char* expected_result) { #if CBOR_PRETTY_PRINTER // We know the expected size based on `expected_result`, but read everything // in order to get the full actual output in a useful error message. const size_t buffer_size = 512; - FILE *outfile = tmpfile(); + FILE* outfile = tmpfile(); cbor_describe(item, outfile); rewind(outfile); // Treat string as null-terminated since cmocka doesn't have asserts // for explicit length strings. - char *output = malloc(buffer_size); + char* output = malloc(buffer_size); assert_non_null(output); size_t output_size = fread(output, sizeof(char), buffer_size, outfile); output[output_size] = '\0'; @@ -32,31 +32,31 @@ void assert_describe_result(cbor_item_t *item, char *expected_result) { #endif } -static void test_uint(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_uint8(42); +static void test_uint(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_uint8(42); assert_describe_result(item, "[CBOR_TYPE_UINT] Width: 1B, Value: 42\n"); cbor_decref(&item); } -static void test_negint(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_negint16(40); +static void test_negint(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_negint16(40); assert_describe_result(item, "[CBOR_TYPE_NEGINT] Width: 2B, Value: -40 - 1\n"); cbor_decref(&item); } -static void test_definite_bytestring(void **_CBOR_UNUSED(_state)) { +static void test_definite_bytestring(void** _state _CBOR_UNUSED) { unsigned char data[] = {0x01, 0x02, 0x03}; - cbor_item_t *item = cbor_build_bytestring(data, 3); + cbor_item_t* item = cbor_build_bytestring(data, 3); assert_describe_result(item, "[CBOR_TYPE_BYTESTRING] Definite, Length: 3B, Data:\n" " 010203\n"); cbor_decref(&item); } -static void test_indefinite_bytestring(void **_CBOR_UNUSED(_state)) { +static void test_indefinite_bytestring(void** _state _CBOR_UNUSED) { unsigned char data[] = {0x01, 0x02, 0x03}; - cbor_item_t *item = cbor_new_indefinite_bytestring(); + cbor_item_t* item = cbor_new_indefinite_bytestring(); assert_true(cbor_bytestring_add_chunk( item, cbor_move(cbor_build_bytestring(data, 3)))); assert_true(cbor_bytestring_add_chunk( @@ -71,9 +71,9 @@ static void test_indefinite_bytestring(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_definite_string(void **_CBOR_UNUSED(_state)) { - char *string = "Hello!"; - cbor_item_t *item = cbor_build_string(string); +static void test_definite_string(void** _state _CBOR_UNUSED) { + char* string = "Hello!"; + cbor_item_t* item = cbor_build_string(string); assert_describe_result( item, "[CBOR_TYPE_STRING] Definite, Length: 6B, Codepoints: 6, Data:\n" @@ -81,9 +81,9 @@ static void test_definite_string(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_indefinite_string(void **_CBOR_UNUSED(_state)) { - char *string = "Hello!"; - cbor_item_t *item = cbor_new_indefinite_string(); +static void test_indefinite_string(void** _state _CBOR_UNUSED) { + char* string = "Hello!"; + cbor_item_t* item = cbor_new_indefinite_string(); assert_true( cbor_string_add_chunk(item, cbor_move(cbor_build_string(string)))); assert_true( @@ -98,10 +98,10 @@ static void test_indefinite_string(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_multibyte_string(void **_CBOR_UNUSED(_state)) { +static void test_multibyte_string(void** _state _CBOR_UNUSED) { // "Štěstíčko" in UTF-8 - char *string = "\xc5\xa0t\xc4\x9bst\xc3\xad\xc4\x8dko"; - cbor_item_t *item = cbor_build_string(string); + char* string = "\xc5\xa0t\xc4\x9bst\xc3\xad\xc4\x8dko"; + cbor_item_t* item = cbor_build_string(string); assert_describe_result( item, "[CBOR_TYPE_STRING] Definite, Length: 13B, Codepoints: 9, Data:\n" @@ -109,8 +109,8 @@ static void test_multibyte_string(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_definite_array(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_array(2); +static void test_definite_array(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_array(2); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(1)))); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(2)))); assert_describe_result(item, @@ -120,8 +120,8 @@ static void test_definite_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_indefinite_array(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_array(); +static void test_indefinite_array(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(1)))); assert_true(cbor_array_push(item, cbor_move(cbor_build_uint8(2)))); assert_describe_result(item, @@ -131,8 +131,8 @@ static void test_indefinite_array(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_definite_map(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_definite_map(1); +static void test_definite_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_definite_map(1); assert_true(cbor_map_add( item, (struct cbor_pair){.key = cbor_move(cbor_build_uint8(1)), .value = cbor_move(cbor_build_uint8(2))})); @@ -144,8 +144,8 @@ static void test_definite_map(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_indefinite_map(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_map(); +static void test_indefinite_map(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_map(); assert_true(cbor_map_add( item, (struct cbor_pair){.key = cbor_move(cbor_build_uint8(1)), .value = cbor_move(cbor_build_uint8(2))})); @@ -157,16 +157,16 @@ static void test_indefinite_map(void **_CBOR_UNUSED(_state)) { cbor_decref(&item); } -static void test_tag(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_build_tag(42, cbor_move(cbor_build_uint8(1))); +static void test_tag(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_build_tag(42, cbor_move(cbor_build_uint8(1))); assert_describe_result(item, "[CBOR_TYPE_TAG] Value: 42\n" " [CBOR_TYPE_UINT] Width: 1B, Value: 1\n"); cbor_decref(&item); } -static void test_floats(void **_CBOR_UNUSED(_state)) { - cbor_item_t *item = cbor_new_indefinite_array(); +static void test_floats(void** _state _CBOR_UNUSED) { + cbor_item_t* item = cbor_new_indefinite_array(); assert_true(cbor_array_push(item, cbor_move(cbor_build_bool(true)))); assert_true( cbor_array_push(item, cbor_move(cbor_build_ctrl(CBOR_CTRL_UNDEF)))); diff --git a/contrib/libcbor/test/stack_over_limit_test.c b/contrib/libcbor/test/stack_over_limit_test.c index 73b6bdc2567b..fef2c6dd018e 100644 --- a/contrib/libcbor/test/stack_over_limit_test.c +++ b/contrib/libcbor/test/stack_over_limit_test.c @@ -1,9 +1,9 @@ #include "assertions.h" #include "cbor.h" -static size_t generate_overflow_data(unsigned char **overflow_data) { +static size_t generate_overflow_data(unsigned char** overflow_data) { int i; - *overflow_data = (unsigned char *)malloc(CBOR_MAX_STACK_SIZE + 3); + *overflow_data = (unsigned char*)malloc(CBOR_MAX_STACK_SIZE + 3); for (i = 0; i < CBOR_MAX_STACK_SIZE + 1; i++) { (*overflow_data)[i] = 0xC2; // tag of positive bignum } @@ -12,8 +12,8 @@ static size_t generate_overflow_data(unsigned char **overflow_data) { return CBOR_MAX_STACK_SIZE + 3; } -static void test_stack_over_limit(void **_CBOR_UNUSED(_state)) { - unsigned char *overflow_data; +static void test_stack_over_limit(void** _state _CBOR_UNUSED) { + unsigned char* overflow_data; size_t overflow_data_len; struct cbor_load_result res; overflow_data_len = generate_overflow_data(&overflow_data); diff --git a/contrib/libcbor/test/stream_expectations.c b/contrib/libcbor/test/stream_expectations.c index 3592c9625da5..2a1b512f36c6 100644 --- a/contrib/libcbor/test/stream_expectations.c +++ b/contrib/libcbor/test/stream_expectations.c @@ -4,7 +4,7 @@ struct test_assertion assertions_queue[MAX_QUEUE_ITEMS]; int queue_size = 0; int current_expectation = 0; -int clean_up_stream_assertions(void **state) { +int clean_up_stream_assertions(void** state) { if (queue_size != current_expectation) { return 1; // We have not matched all expectations correctly } @@ -25,7 +25,7 @@ void assert_uint8_eq(uint8_t actual) { UINT8_EQ, (union test_expectation_data){.int8 = actual}}; } -void uint8_callback(void *_CBOR_UNUSED(_context), uint8_t actual) { +void uint8_callback(void* _context _CBOR_UNUSED, uint8_t actual) { assert_true(current().expectation == UINT8_EQ); assert_true(current().data.int8 == actual); current_expectation++; @@ -36,7 +36,7 @@ void assert_uint16_eq(uint16_t actual) { UINT16_EQ, (union test_expectation_data){.int16 = actual}}; } -void uint16_callback(void *_CBOR_UNUSED(_context), uint16_t actual) { +void uint16_callback(void* _context _CBOR_UNUSED, uint16_t actual) { assert_true(current().expectation == UINT16_EQ); assert_true(current().data.int16 == actual); current_expectation++; @@ -47,7 +47,7 @@ void assert_uint32_eq(uint32_t actual) { UINT32_EQ, (union test_expectation_data){.int32 = actual}}; } -void uint32_callback(void *_CBOR_UNUSED(_context), uint32_t actual) { +void uint32_callback(void* _context _CBOR_UNUSED, uint32_t actual) { assert_true(current().expectation == UINT32_EQ); assert_true(current().data.int32 == actual); current_expectation++; @@ -58,7 +58,7 @@ void assert_uint64_eq(uint64_t actual) { UINT64_EQ, (union test_expectation_data){.int64 = actual}}; } -void uint64_callback(void *_CBOR_UNUSED(_context), uint64_t actual) { +void uint64_callback(void* _context _CBOR_UNUSED, uint64_t actual) { assert_true(current().expectation == UINT64_EQ); assert_true(current().data.int64 == actual); current_expectation++; @@ -69,7 +69,7 @@ void assert_negint8_eq(uint8_t actual) { NEGINT8_EQ, (union test_expectation_data){.int8 = actual}}; } -void negint8_callback(void *_CBOR_UNUSED(_context), uint8_t actual) { +void negint8_callback(void* _context _CBOR_UNUSED, uint8_t actual) { assert_true(current().expectation == NEGINT8_EQ); assert_true(current().data.int8 == actual); current_expectation++; @@ -80,7 +80,7 @@ void assert_negint16_eq(uint16_t actual) { NEGINT16_EQ, (union test_expectation_data){.int16 = actual}}; } -void negint16_callback(void *_CBOR_UNUSED(_context), uint16_t actual) { +void negint16_callback(void* _context _CBOR_UNUSED, uint16_t actual) { assert_true(current().expectation == NEGINT16_EQ); assert_true(current().data.int16 == actual); current_expectation++; @@ -91,7 +91,7 @@ void assert_negint32_eq(uint32_t actual) { NEGINT32_EQ, (union test_expectation_data){.int32 = actual}}; } -void negint32_callback(void *_CBOR_UNUSED(_context), uint32_t actual) { +void negint32_callback(void* _context _CBOR_UNUSED, uint32_t actual) { assert_true(current().expectation == NEGINT32_EQ); assert_true(current().data.int32 == actual); current_expectation++; @@ -102,7 +102,7 @@ void assert_negint64_eq(uint64_t actual) { NEGINT64_EQ, (union test_expectation_data){.int64 = actual}}; } -void negint64_callback(void *_CBOR_UNUSED(_context), uint64_t actual) { +void negint64_callback(void* _context _CBOR_UNUSED, uint64_t actual) { assert_true(current().expectation == NEGINT64_EQ); assert_true(current().data.int64 == actual); current_expectation++; @@ -114,7 +114,7 @@ void assert_bstring_mem_eq(cbor_data address, size_t length) { (union test_expectation_data){.string = {address, length}}}; } -void byte_string_callback(void *_CBOR_UNUSED(_context), cbor_data address, +void byte_string_callback(void* _context _CBOR_UNUSED, cbor_data address, uint64_t length) { assert_true(current().expectation == BSTRING_MEM_EQ); assert_true(current().data.string.address == address); @@ -127,7 +127,7 @@ void assert_bstring_indef_start(void) { (struct test_assertion){.expectation = BSTRING_INDEF_START}; } -void byte_string_start_callback(void *_CBOR_UNUSED(_context)) { +void byte_string_start_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == BSTRING_INDEF_START); current_expectation++; } @@ -138,7 +138,7 @@ void assert_string_mem_eq(cbor_data address, size_t length) { (union test_expectation_data){.string = {address, length}}}; } -void string_callback(void *_CBOR_UNUSED(_context), cbor_data address, +void string_callback(void* _context _CBOR_UNUSED, cbor_data address, uint64_t length) { assert_true(current().expectation == STRING_MEM_EQ); assert_true(current().data.string.address == address); @@ -151,7 +151,7 @@ void assert_string_indef_start(void) { (struct test_assertion){.expectation = STRING_INDEF_START}; } -void string_start_callback(void *_CBOR_UNUSED(_context)) { +void string_start_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == STRING_INDEF_START); current_expectation++; } @@ -161,7 +161,7 @@ void assert_indef_break(void) { (struct test_assertion){.expectation = INDEF_BREAK}; } -void indef_break_callback(void *_CBOR_UNUSED(_context)) { +void indef_break_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == INDEF_BREAK); current_expectation++; } @@ -171,7 +171,7 @@ void assert_array_start(size_t length) { (struct test_assertion){ARRAY_START, {.length = length}}; } -void array_start_callback(void *_CBOR_UNUSED(_context), uint64_t length) { +void array_start_callback(void* _context _CBOR_UNUSED, uint64_t length) { assert_true(current().expectation == ARRAY_START); assert_true(current().data.length == length); current_expectation++; @@ -182,7 +182,7 @@ void assert_indef_array_start(void) { (struct test_assertion){.expectation = ARRAY_INDEF_START}; } -void indef_array_start_callback(void *_CBOR_UNUSED(_context)) { +void indef_array_start_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == ARRAY_INDEF_START); current_expectation++; } @@ -192,7 +192,7 @@ void assert_map_start(size_t length) { (struct test_assertion){MAP_START, {.length = length}}; } -void map_start_callback(void *_CBOR_UNUSED(_context), uint64_t length) { +void map_start_callback(void* _context _CBOR_UNUSED, uint64_t length) { assert_true(current().expectation == MAP_START); assert_true(current().data.length == length); current_expectation++; @@ -203,7 +203,7 @@ void assert_indef_map_start(void) { (struct test_assertion){.expectation = MAP_INDEF_START}; } -void indef_map_start_callback(void *_CBOR_UNUSED(_context)) { +void indef_map_start_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == MAP_INDEF_START); current_expectation++; } @@ -213,7 +213,7 @@ void assert_tag_eq(uint64_t value) { (struct test_assertion){TAG_EQ, {.int64 = value}}; } -void tag_callback(void *_CBOR_UNUSED(_context), uint64_t value) { +void tag_callback(void* _context _CBOR_UNUSED, uint64_t value) { assert_true(current().expectation == TAG_EQ); assert_true(current().data.int64 == value); current_expectation++; @@ -224,7 +224,7 @@ void assert_half(float value) { (struct test_assertion){HALF_EQ, {.float2 = value}}; } -void half_callback(void *_CBOR_UNUSED(_context), float actual) { +void half_callback(void* _context _CBOR_UNUSED, float actual) { assert_true(current().expectation == HALF_EQ); assert_true(current().data.float2 == actual); current_expectation++; @@ -235,7 +235,7 @@ void assert_float(float value) { (struct test_assertion){FLOAT_EQ, {.float4 = value}}; } -void float_callback(void *_CBOR_UNUSED(_context), float actual) { +void float_callback(void* _context _CBOR_UNUSED, float actual) { assert_true(current().expectation == FLOAT_EQ); assert_true(current().data.float4 == actual); current_expectation++; @@ -246,7 +246,7 @@ void assert_double(double value) { (struct test_assertion){DOUBLE_EQ, {.float8 = value}}; } -void double_callback(void *_CBOR_UNUSED(_context), double actual) { +void double_callback(void* _context _CBOR_UNUSED, double actual) { assert_true(current().expectation == DOUBLE_EQ); assert_true(current().data.float8 == actual); current_expectation++; @@ -266,18 +266,18 @@ void assert_undef(void) { (struct test_assertion){.expectation = UNDEF}; } -void bool_callback(void *_CBOR_UNUSED(_context), bool actual) { +void bool_callback(void* _context _CBOR_UNUSED, bool actual) { assert_true(current().expectation == BOOL_EQ); assert_true(current().data.boolean == actual); current_expectation++; } -void null_callback(void *_CBOR_UNUSED(_context)) { +void null_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == NIL); current_expectation++; } -void undef_callback(void *_CBOR_UNUSED(_context)) { +void undef_callback(void* _context _CBOR_UNUSED) { assert_true(current().expectation == UNDEF); current_expectation++; } diff --git a/contrib/libcbor/test/stream_expectations.h b/contrib/libcbor/test/stream_expectations.h index bfc58e97cd9f..f67d025f9eaa 100644 --- a/contrib/libcbor/test/stream_expectations.h +++ b/contrib/libcbor/test/stream_expectations.h @@ -78,7 +78,7 @@ struct test_assertion { struct cbor_decoder_result decode(cbor_data, size_t); /* Verify all assertions were applied and clean up */ -int clean_up_stream_assertions(void **); +int clean_up_stream_assertions(void**); /* Assertions builders */ void assert_uint8_eq(uint8_t); @@ -116,37 +116,37 @@ void assert_undef(void); void assert_indef_break(void); /* Assertions verifying callbacks */ -void uint8_callback(void *, uint8_t); -void uint16_callback(void *, uint16_t); -void uint32_callback(void *, uint32_t); -void uint64_callback(void *, uint64_t); +void uint8_callback(void*, uint8_t); +void uint16_callback(void*, uint16_t); +void uint32_callback(void*, uint32_t); +void uint64_callback(void*, uint64_t); -void negint8_callback(void *, uint8_t); -void negint16_callback(void *, uint16_t); -void negint32_callback(void *, uint32_t); -void negint64_callback(void *, uint64_t); +void negint8_callback(void*, uint8_t); +void negint16_callback(void*, uint16_t); +void negint32_callback(void*, uint32_t); +void negint64_callback(void*, uint64_t); -void byte_string_callback(void *, cbor_data, uint64_t); -void byte_string_start_callback(void *); +void byte_string_callback(void*, cbor_data, uint64_t); +void byte_string_start_callback(void*); -void string_callback(void *, cbor_data, uint64_t); -void string_start_callback(void *); +void string_callback(void*, cbor_data, uint64_t); +void string_start_callback(void*); -void array_start_callback(void *, uint64_t); -void indef_array_start_callback(void *); +void array_start_callback(void*, uint64_t); +void indef_array_start_callback(void*); -void map_start_callback(void *, uint64_t); -void indef_map_start_callback(void *); +void map_start_callback(void*, uint64_t); +void indef_map_start_callback(void*); -void tag_callback(void *, uint64_t); +void tag_callback(void*, uint64_t); -void half_callback(void *, float); -void float_callback(void *, float); -void double_callback(void *, double); -void indef_break_callback(void *); +void half_callback(void*, float); +void float_callback(void*, float); +void double_callback(void*, double); +void indef_break_callback(void*); -void bool_callback(void *, bool); -void null_callback(void *); -void undef_callback(void *); +void bool_callback(void*, bool); +void null_callback(void*); +void undef_callback(void*); #endif diff --git a/contrib/libcbor/test/string_encoders_test.c b/contrib/libcbor/test/string_encoders_test.c index 6de1cfb27eb3..2c00ff991090 100644 --- a/contrib/libcbor/test/string_encoders_test.c +++ b/contrib/libcbor/test/string_encoders_test.c @@ -10,18 +10,18 @@ unsigned char buffer[512]; -static void test_embedded_string_start(void **_CBOR_UNUSED(_state)) { +static void test_embedded_string_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_string_start(1, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x61}), 1); } -static void test_string_start(void **_CBOR_UNUSED(_state)) { +static void test_string_start(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_string_start(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x7A, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_indef_string_start(void **_CBOR_UNUSED(_state)) { +static void test_indef_string_start(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_indef_string_start(buffer, 512)); assert_size_equal(0, cbor_encode_indef_string_start(buffer, 0)); assert_memory_equal(buffer, ((unsigned char[]){0x7F}), 1); diff --git a/contrib/libcbor/test/string_test.c b/contrib/libcbor/test/string_test.c index c3079b449838..9ce3bdfb7d17 100644 --- a/contrib/libcbor/test/string_test.c +++ b/contrib/libcbor/test/string_test.c @@ -10,12 +10,12 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *string; +cbor_item_t* string; struct cbor_load_result res; unsigned char empty_string_data[] = {0x60}; -static void test_empty_string(void **_CBOR_UNUSED(_state)) { +static void test_empty_string(void** _state _CBOR_UNUSED) { string = cbor_load(empty_string_data, 1, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -31,7 +31,7 @@ unsigned char short_string_data[] = {0x6C, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64, 0x21}; /* 0x60 + 12 | Hello world! */ -static void test_short_string(void **_CBOR_UNUSED(_state)) { +static void test_short_string(void** _state _CBOR_UNUSED) { string = cbor_load(short_string_data, 13, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -49,7 +49,7 @@ unsigned char short_multibyte_string_data[] = { 0xC3, 0x9F, 0x76, 0xC4, 0x9B, 0x74, 0x65, 0x21}; /* 0x60 + 15 | Čaues ßvěte! */ -static void test_short_multibyte_string(void **_CBOR_UNUSED(_state)) { +static void test_short_multibyte_string(void** _state _CBOR_UNUSED) { string = cbor_load(short_multibyte_string_data, 16, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -78,7 +78,7 @@ unsigned char int8_string_data[] = { 0x70, 0x6F, 0x73, 0x75, 0x65, 0x72, 0x65, 0x2E}; /* 150 | Lorem ....*/ -static void test_int8_string(void **_CBOR_UNUSED(_state)) { +static void test_int8_string(void** _state _CBOR_UNUSED) { string = cbor_load(int8_string_data, 152, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -112,7 +112,7 @@ unsigned char int16_string_data[] = { /* 150 | Lorem ....*/ /* This valid but not realistic - length 150 could be encoded in a single * uint8_t (but we need to keep the test files reasonably compact) */ -static void test_int16_string(void **_CBOR_UNUSED(_state)) { +static void test_int16_string(void** _state _CBOR_UNUSED) { string = cbor_load(int16_string_data, 153, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -145,7 +145,7 @@ unsigned char int32_string_data[] = { 0x74, 0x6F, 0x20, 0x70, 0x6F, 0x73, 0x75, 0x65, 0x72, 0x65, 0x2E}; /* 150 | Lorem ....*/ -static void test_int32_string(void **_CBOR_UNUSED(_state)) { +static void test_int32_string(void** _state _CBOR_UNUSED) { string = cbor_load(int32_string_data, 155, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -179,7 +179,7 @@ unsigned char int64_string_data[] = { 0x72, 0x65, 0x2E}; /* 150 | Lorem ....*/ -static void test_int64_string(void **_CBOR_UNUSED(_state)) { +static void test_int64_string(void** _state _CBOR_UNUSED) { string = cbor_load(int64_string_data, 159, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -201,7 +201,7 @@ unsigned char short_indef_string_data[] = {0x7F, 0x78, 0x01, 0x65, 0xFF, 0xFF}; /* start | string | break| extra */ -static void test_short_indef_string(void **_CBOR_UNUSED(_state)) { +static void test_short_indef_string(void** _state _CBOR_UNUSED) { string = cbor_load(short_indef_string_data, 6, &res); assert_non_null(string); assert_true(cbor_typeof(string) == CBOR_TYPE_STRING); @@ -217,7 +217,7 @@ static void test_short_indef_string(void **_CBOR_UNUSED(_state)) { assert_null(string); } -static void test_invalid_utf(void **_CBOR_UNUSED(_state)) { +static void test_invalid_utf(void** _state _CBOR_UNUSED) { /* 0x60 + 1 | 0xC5 (invalid unfinished 2B codepoint) */ unsigned char string_data[] = {0x61, 0xC5}; string = cbor_load(string_data, 2, &res); @@ -233,13 +233,13 @@ static void test_invalid_utf(void **_CBOR_UNUSED(_state)) { cbor_decref(&string); } -static void test_inline_creation(void **_CBOR_UNUSED(_state)) { +static void test_inline_creation(void** _state _CBOR_UNUSED) { string = cbor_build_string("Hello!"); assert_memory_equal(cbor_string_handle(string), "Hello!", strlen("Hello!")); cbor_decref(&string); } -static void test_string_creation(void **_CBOR_UNUSED(_state)) { +static void test_string_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_definite_string()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_indefinite_string()); }); @@ -255,17 +255,17 @@ static void test_string_creation(void **_CBOR_UNUSED(_state)) { MALLOC_FAIL); } -static void test_string_add_chunk(void **_CBOR_UNUSED(_state)) { +static void test_string_add_chunk(void** _state _CBOR_UNUSED) { WITH_MOCK_MALLOC( { - cbor_item_t *string = cbor_new_indefinite_string(); - cbor_item_t *chunk = cbor_build_string("Hello!"); + cbor_item_t* string = cbor_new_indefinite_string(); + cbor_item_t* chunk = cbor_build_string("Hello!"); assert_false(cbor_string_add_chunk(string, chunk)); assert_size_equal(cbor_string_chunk_count(string), 0); - assert_size_equal(((struct cbor_indefinite_string_data *)string->data) - ->chunk_capacity, - 0); + assert_size_equal( + ((struct cbor_indefinite_string_data*)string->data)->chunk_capacity, + 0); cbor_decref(&chunk); cbor_decref(&string); @@ -273,11 +273,11 @@ static void test_string_add_chunk(void **_CBOR_UNUSED(_state)) { 5, MALLOC, MALLOC, MALLOC, MALLOC, REALLOC_FAIL); } -static void test_add_chunk_reallocation_overflow(void **_CBOR_UNUSED(_state)) { +static void test_add_chunk_reallocation_overflow(void** _state _CBOR_UNUSED) { string = cbor_new_indefinite_string(); - cbor_item_t *chunk = cbor_build_string("Hello!"); - struct cbor_indefinite_string_data *metadata = - (struct cbor_indefinite_string_data *)string->data; + cbor_item_t* chunk = cbor_build_string("Hello!"); + struct cbor_indefinite_string_data* metadata = + (struct cbor_indefinite_string_data*)string->data; // Pretend we already have many chunks allocated metadata->chunk_count = SIZE_MAX; metadata->chunk_capacity = SIZE_MAX; @@ -291,10 +291,10 @@ static void test_add_chunk_reallocation_overflow(void **_CBOR_UNUSED(_state)) { cbor_decref(&string); } -static void test_set_handle(void **_CBOR_UNUSED(_state)) { +static void test_set_handle(void** _state _CBOR_UNUSED) { string = cbor_new_definite_string(); - char *test_string = "Hello"; - unsigned char *string_data = malloc(strlen(test_string)); + char* test_string = "Hello"; + unsigned char* string_data = malloc(strlen(test_string)); memcpy(string_data, test_string, strlen(test_string)); assert_ptr_not_equal(string_data, NULL); cbor_string_set_handle(string, string_data, strlen(test_string)); @@ -306,11 +306,11 @@ static void test_set_handle(void **_CBOR_UNUSED(_state)) { cbor_decref(&string); } -static void test_set_handle_multibyte_codepoint(void **_CBOR_UNUSED(_state)) { +static void test_set_handle_multibyte_codepoint(void** _state _CBOR_UNUSED) { string = cbor_new_definite_string(); // "Štěstíčko" in UTF-8 - char *test_string = "\xc5\xa0t\xc4\x9bst\xc3\xad\xc4\x8dko"; - unsigned char *string_data = malloc(strlen(test_string)); + char* test_string = "\xc5\xa0t\xc4\x9bst\xc3\xad\xc4\x8dko"; + unsigned char* string_data = malloc(strlen(test_string)); memcpy(string_data, test_string, strlen(test_string)); assert_ptr_not_equal(string_data, NULL); cbor_string_set_handle(string, string_data, strlen(test_string)); @@ -322,11 +322,11 @@ static void test_set_handle_multibyte_codepoint(void **_CBOR_UNUSED(_state)) { cbor_decref(&string); } -static void test_set_handle_invalid_utf(void **_CBOR_UNUSED(_state)) { +static void test_set_handle_invalid_utf(void** _state _CBOR_UNUSED) { string = cbor_new_definite_string(); // Invalid multi-byte character (missing the second byte). - char *test_string = "Test: \xc5"; - unsigned char *string_data = malloc(strlen(test_string)); + char* test_string = "Test: \xc5"; + unsigned char* string_data = malloc(strlen(test_string)); memcpy(string_data, test_string, strlen(test_string)); assert_ptr_not_equal(string_data, NULL); cbor_string_set_handle(string, string_data, strlen(test_string)); diff --git a/contrib/libcbor/test/tag_encoders_test.c b/contrib/libcbor/test/tag_encoders_test.c index 5962dd9d8938..06f87475392d 100644 --- a/contrib/libcbor/test/tag_encoders_test.c +++ b/contrib/libcbor/test/tag_encoders_test.c @@ -10,12 +10,12 @@ unsigned char buffer[512]; -static void test_embedded_tag(void **_CBOR_UNUSED(_state)) { +static void test_embedded_tag(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_tag(1, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xC1}), 1); } -static void test_tag(void **_CBOR_UNUSED(_state)) { +static void test_tag(void** _state _CBOR_UNUSED) { assert_size_equal(5, cbor_encode_tag(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0xDA, 0x00, 0x0F, 0x42, 0x40}), 5); diff --git a/contrib/libcbor/test/tag_test.c b/contrib/libcbor/test/tag_test.c index 4bce10589803..293953cb4d9b 100644 --- a/contrib/libcbor/test/tag_test.c +++ b/contrib/libcbor/test/tag_test.c @@ -9,15 +9,15 @@ #include "cbor.h" #include "test_allocator.h" -cbor_item_t *tag; +cbor_item_t* tag; struct cbor_load_result res; unsigned char embedded_tag_data[] = {0xC0, 0x00}; -static void test_refcounting(void **_CBOR_UNUSED(_state)) { +static void test_refcounting(void** _state _CBOR_UNUSED) { tag = cbor_load(embedded_tag_data, 2, &res); assert_true(cbor_refcount(tag) == 1); - cbor_item_t *item = cbor_tag_item(tag); + cbor_item_t* item = cbor_tag_item(tag); assert_true(cbor_refcount(item) == 2); cbor_decref(&tag); assert_null(tag); @@ -27,7 +27,7 @@ static void test_refcounting(void **_CBOR_UNUSED(_state)) { } /* Tag 0 + uint 0 */ -static void test_embedded_tag(void **_CBOR_UNUSED(_state)) { +static void test_embedded_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(embedded_tag_data, 2, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 0); @@ -39,7 +39,7 @@ static void test_embedded_tag(void **_CBOR_UNUSED(_state)) { unsigned char int8_tag_data[] = {0xD8, 0xFF, 0x01}; /* Tag 255 + uint 1 */ -static void test_int8_tag(void **_CBOR_UNUSED(_state)) { +static void test_int8_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(int8_tag_data, 3, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 255); @@ -51,7 +51,7 @@ static void test_int8_tag(void **_CBOR_UNUSED(_state)) { unsigned char int16_tag_data[] = {0xD9, 0xFF, 0x00, 0x02}; /* Tag 255 << 8 + uint 2 */ -static void test_int16_tag(void **_CBOR_UNUSED(_state)) { +static void test_int16_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(int16_tag_data, 4, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 255 << 8); @@ -63,7 +63,7 @@ static void test_int16_tag(void **_CBOR_UNUSED(_state)) { unsigned char int32_tag_data[] = {0xDA, 0xFF, 0x00, 0x00, 0x00, 0x03}; /* uint 3 */ -static void test_int32_tag(void **_CBOR_UNUSED(_state)) { +static void test_int32_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(int32_tag_data, 6, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 4278190080ULL); @@ -76,7 +76,7 @@ unsigned char int64_tag_data[] = {0xDB, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}; /* uint 4 */ -static void test_int64_tag(void **_CBOR_UNUSED(_state)) { +static void test_int64_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(int64_tag_data, 10, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 18374686479671623680ULL); @@ -88,11 +88,11 @@ static void test_int64_tag(void **_CBOR_UNUSED(_state)) { unsigned char nested_tag_data[] = {0xC0, 0xC1, 0x18, 0x2A}; /* Tag 0, tag 1 + uint 0 */ -static void test_nested_tag(void **_CBOR_UNUSED(_state)) { +static void test_nested_tag(void** _state _CBOR_UNUSED) { tag = cbor_load(nested_tag_data, 4, &res); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(tag) == 0); - cbor_item_t *nested_tag = cbor_tag_item(tag); + cbor_item_t* nested_tag = cbor_tag_item(tag); assert_true(cbor_typeof(nested_tag) == CBOR_TYPE_TAG); assert_true(cbor_tag_value(nested_tag) == 1); assert_uint8(cbor_move(cbor_tag_item(nested_tag)), 42); @@ -102,13 +102,13 @@ static void test_nested_tag(void **_CBOR_UNUSED(_state)) { assert_null(nested_tag); } -static void test_all_tag_values_supported(void **_CBOR_UNUSED(_state)) { +static void test_all_tag_values_supported(void** _state _CBOR_UNUSED) { /* Test all items in the protected range of * https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml */ - for (int64_t tag_value = 0; tag_value <= 32767; tag_value++) { - cbor_item_t *tag_item = + for (uint64_t tag_value = 0; tag_value <= 32767; tag_value++) { + cbor_item_t* tag_item = cbor_build_tag(tag_value, cbor_move(cbor_build_uint8(42))); - unsigned char *serialized_tag; + unsigned char* serialized_tag; size_t serialized_tag_size = cbor_serialize_alloc(tag_item, &serialized_tag, NULL); assert_true(serialized_tag_size > 0); @@ -124,7 +124,7 @@ static void test_all_tag_values_supported(void **_CBOR_UNUSED(_state)) { } } -static void test_build_tag(void **_CBOR_UNUSED(_state)) { +static void test_build_tag(void** _state _CBOR_UNUSED) { tag = cbor_build_tag(1, cbor_move(cbor_build_uint8(42))); assert_true(cbor_typeof(tag) == CBOR_TYPE_TAG); @@ -134,8 +134,8 @@ static void test_build_tag(void **_CBOR_UNUSED(_state)) { cbor_decref(&tag); } -static void test_build_tag_failure(void **_CBOR_UNUSED(_state)) { - cbor_item_t *tagged_item = cbor_build_uint8(42); +static void test_build_tag_failure(void** _state _CBOR_UNUSED) { + cbor_item_t* tagged_item = cbor_build_uint8(42); WITH_FAILING_MALLOC({ assert_null(cbor_build_tag(1, tagged_item)); }); assert_size_equal(cbor_refcount(tagged_item), 1); @@ -143,7 +143,7 @@ static void test_build_tag_failure(void **_CBOR_UNUSED(_state)) { cbor_decref(&tagged_item); } -static void test_tag_creation(void **_CBOR_UNUSED(_state)) { +static void test_tag_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_tag(42)); }); } diff --git a/contrib/libcbor/test/test_allocator.c b/contrib/libcbor/test/test_allocator.c index a2f98efa22f7..b941c79a4181 100644 --- a/contrib/libcbor/test/test_allocator.c +++ b/contrib/libcbor/test/test_allocator.c @@ -8,8 +8,8 @@ int alloc_calls_expected; // How many alloc calls we got int alloc_calls; -// Array of booleans indicating whether to return a block or fail with NULL -call_expectation *expectations; +// Array of expected call and their behavior (success or failure) +call_expectation* expectations; void set_mock_malloc(int calls, ...) { va_list args; @@ -31,9 +31,9 @@ void finalize_mock_malloc(void) { void print_backtrace(void) { #if HAS_EXECINFO - void *buffer[128]; + void* buffer[128]; int frames = backtrace(buffer, 128); - char **symbols = backtrace_symbols(buffer, frames); + char** symbols = backtrace_symbols(buffer, frames); // Skip this function and the caller for (int i = 2; i < frames; ++i) { printf("%s\n", symbols[i]); @@ -42,7 +42,7 @@ void print_backtrace(void) { #endif } -void *instrumented_malloc(size_t size) { +void* instrumented_malloc(size_t size) { if (alloc_calls >= alloc_calls_expected) { goto error; } @@ -59,13 +59,13 @@ error: print_error( "Unexpected call to malloc(%zu) at position %d of %d; expected %d\n", size, alloc_calls, alloc_calls_expected, - alloc_calls < alloc_calls_expected ? expectations[alloc_calls] : -1); + alloc_calls < alloc_calls_expected ? (int)expectations[alloc_calls] : -1); print_backtrace(); fail(); return NULL; } -void *instrumented_realloc(void *ptr, size_t size) { +void* instrumented_realloc(void* ptr, size_t size) { if (alloc_calls >= alloc_calls_expected) { goto error; } @@ -82,7 +82,7 @@ error: print_error( "Unexpected call to realloc(%zu) at position %d of %d; expected %d\n", size, alloc_calls, alloc_calls_expected, - alloc_calls < alloc_calls_expected ? expectations[alloc_calls] : -1); + alloc_calls < alloc_calls_expected ? (int)expectations[alloc_calls] : -1); print_backtrace(); fail(); return NULL; diff --git a/contrib/libcbor/test/test_allocator.h b/contrib/libcbor/test/test_allocator.h index 0e58454edbd6..09e5791b81d9 100644 --- a/contrib/libcbor/test/test_allocator.h +++ b/contrib/libcbor/test/test_allocator.h @@ -7,9 +7,13 @@ // Harness for mocking `malloc` and `realloc` typedef enum call_expectation { + // Call malloc and return a pointer MALLOC, + // Pretend call malloc, but return NULL (fail) MALLOC_FAIL, + // Call realloc and return a pointer REALLOC, + // Pretend call realloc, but return NULL (fail) REALLOC_FAIL } call_expectation; @@ -17,9 +21,9 @@ void set_mock_malloc(int calls, ...); void finalize_mock_malloc(void); -void *instrumented_malloc(size_t size); +void* instrumented_malloc(size_t size); -void *instrumented_realloc(void *ptr, size_t size); +void* instrumented_realloc(void* ptr, size_t size); #define WITH_MOCK_MALLOC(block, malloc_calls, ...) \ do { \ diff --git a/contrib/libcbor/test/uint_encoders_test.c b/contrib/libcbor/test/uint_encoders_test.c index 59b95a33026e..886086469819 100644 --- a/contrib/libcbor/test/uint_encoders_test.c +++ b/contrib/libcbor/test/uint_encoders_test.c @@ -10,31 +10,31 @@ unsigned char buffer[512]; -static void test_embedded_uint8(void **_CBOR_UNUSED(_state)) { +static void test_embedded_uint8(void** _state _CBOR_UNUSED) { assert_size_equal(1, cbor_encode_uint8(14, buffer, 512)); assert_memory_equal(buffer, (unsigned char[]){0x0E}, 1); } -static void test_uint8(void **_CBOR_UNUSED(_state)) { +static void test_uint8(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_uint8(180, buffer, 1)); assert_size_equal(2, cbor_encode_uint8(255, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x18, 0xFF}), 2); } -static void test_uint16(void **_CBOR_UNUSED(_state)) { +static void test_uint16(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_uint16(1000, buffer, 2)); assert_size_equal(3, cbor_encode_uint16(1000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x19, 0x03, 0xE8}), 3); } -static void test_uint32(void **_CBOR_UNUSED(_state)) { +static void test_uint32(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_uint32(1000000, buffer, 4)); assert_size_equal(5, cbor_encode_uint32(1000000, buffer, 512)); assert_memory_equal(buffer, ((unsigned char[]){0x1A, 0x00, 0x0F, 0x42, 0x40}), 5); } -static void test_uint64(void **_CBOR_UNUSED(_state)) { +static void test_uint64(void** _state _CBOR_UNUSED) { assert_size_equal(0, cbor_encode_uint64(18446744073709551615ULL, buffer, 8)); assert_size_equal(9, cbor_encode_uint64(18446744073709551615ULL, buffer, 512)); @@ -44,7 +44,7 @@ static void test_uint64(void **_CBOR_UNUSED(_state)) { 9); } -static void test_unspecified(void **_CBOR_UNUSED(_state)) { +static void test_unspecified(void** _state _CBOR_UNUSED) { assert_size_equal(9, cbor_encode_uint(18446744073709551615ULL, buffer, 512)); assert_memory_equal( buffer, diff --git a/contrib/libcbor/test/uint_test.c b/contrib/libcbor/test/uint_test.c index 89eb2b91833e..3c269daef238 100644 --- a/contrib/libcbor/test/uint_test.c +++ b/contrib/libcbor/test/uint_test.c @@ -10,7 +10,7 @@ #include "cbor.h" -cbor_item_t *number; +cbor_item_t* number; struct cbor_load_result res; unsigned char data1[] = {0x02, 0xFF}; @@ -20,7 +20,7 @@ unsigned char data4[] = {0x1a, 0xa5, 0xf7, 0x02, 0xb3, 0xFF}; unsigned char data5[] = {0x1b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3, 0xFF}; -static void test_very_short_int(void **_CBOR_UNUSED(_state)) { +static void test_very_short_int(void** _state _CBOR_UNUSED) { number = cbor_load(data1, 2, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_UINT); assert_true(cbor_int_get_width(number) == CBOR_INT_8); @@ -34,13 +34,13 @@ static void test_very_short_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_incomplete_data(void **_CBOR_UNUSED(_state)) { +static void test_incomplete_data(void** _state _CBOR_UNUSED) { number = cbor_load(data2, 1, &res); assert_null(number); assert_true(res.error.code == CBOR_ERR_NOTENOUGHDATA); } -static void test_short_int(void **_CBOR_UNUSED(_state)) { +static void test_short_int(void** _state _CBOR_UNUSED) { number = cbor_load(data2, 3, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_UINT); assert_true(cbor_int_get_width(number) == CBOR_INT_8); @@ -54,7 +54,7 @@ static void test_short_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_half_int(void **_CBOR_UNUSED(_state)) { +static void test_half_int(void** _state _CBOR_UNUSED) { number = cbor_load(data3, 5, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_UINT); assert_true(cbor_int_get_width(number) == CBOR_INT_16); @@ -68,7 +68,7 @@ static void test_half_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_int(void **_CBOR_UNUSED(_state)) { +static void test_int(void** _state _CBOR_UNUSED) { number = cbor_load(data4, 6, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_UINT); assert_true(cbor_int_get_width(number) == CBOR_INT_32); @@ -82,7 +82,7 @@ static void test_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_long_int(void **_CBOR_UNUSED(_state)) { +static void test_long_int(void** _state _CBOR_UNUSED) { number = cbor_load(data5, 10, &res); assert_true(cbor_typeof(number) == CBOR_TYPE_UINT); assert_true(cbor_int_get_width(number) == CBOR_INT_64); @@ -96,7 +96,7 @@ static void test_long_int(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_refcounting(void **_CBOR_UNUSED(_state)) { +static void test_refcounting(void** _state _CBOR_UNUSED) { number = cbor_load(data5, 10, &res); cbor_incref(number); assert_true(number->refcount == 2); @@ -106,13 +106,13 @@ static void test_refcounting(void **_CBOR_UNUSED(_state)) { assert_null(number); } -static void test_empty_input(void **_CBOR_UNUSED(_state)) { +static void test_empty_input(void** _state _CBOR_UNUSED) { number = cbor_load(data5, 0, &res); assert_null(number); assert_true(res.error.code == CBOR_ERR_NODATA); } -static void test_inline_creation(void **_CBOR_UNUSED(_state)) { +static void test_inline_creation(void** _state _CBOR_UNUSED) { number = cbor_build_uint8(10); assert_true(cbor_get_int(number) == 10); cbor_decref(&number); @@ -130,7 +130,7 @@ static void test_inline_creation(void **_CBOR_UNUSED(_state)) { cbor_decref(&number); } -static void test_int_creation(void **_CBOR_UNUSED(_state)) { +static void test_int_creation(void** _state _CBOR_UNUSED) { WITH_FAILING_MALLOC({ assert_null(cbor_new_int8()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_int16()); }); WITH_FAILING_MALLOC({ assert_null(cbor_new_int32()); }); diff --git a/contrib/libcbor/test/unicode_test.c b/contrib/libcbor/test/unicode_test.c index 4b3613e77f78..62245bc1fe4a 100644 --- a/contrib/libcbor/test/unicode_test.c +++ b/contrib/libcbor/test/unicode_test.c @@ -13,7 +13,7 @@ struct _cbor_unicode_status status; unsigned char missing_bytes_data[] = {0xC4, 0x8C}; /* Capital accented C */ -static void test_missing_bytes(void **_CBOR_UNUSED(_state)) { +static void test_missing_bytes(void** _state _CBOR_UNUSED) { assert_true(_cbor_unicode_codepoint_count(missing_bytes_data, 1, &status) == 0); assert_true(status.status == _CBOR_UNICODE_BADCP); @@ -28,7 +28,7 @@ static void test_missing_bytes(void **_CBOR_UNUSED(_state)) { unsigned char invalid_sequence_data[] = {0x65, 0xC4, 0x00}; /* e, invalid seq */ -static void test_invalid_sequence(void **_CBOR_UNUSED(_state)) { +static void test_invalid_sequence(void** _state _CBOR_UNUSED) { assert_true( _cbor_unicode_codepoint_count(invalid_sequence_data, 3, &status) == 0); assert_true(status.status == _CBOR_UNICODE_BADCP); diff --git a/etc/mtree/BSD.debug.dist b/etc/mtree/BSD.debug.dist index 5eabea923cbd..8c333be48201 100644 --- a/etc/mtree/BSD.debug.dist +++ b/etc/mtree/BSD.debug.dist @@ -49,6 +49,12 @@ .. krb5 plugins + kdb + .. + preauth + .. + tls + .. .. .. libxo @@ -57,6 +63,8 @@ .. ossl-modules .. + virtual_oss + .. .. libexec bsdinstall diff --git a/etc/mtree/BSD.include.dist b/etc/mtree/BSD.include.dist index 487973bcc9b5..97f2194a3fa1 100644 --- a/etc/mtree/BSD.include.dist +++ b/etc/mtree/BSD.include.dist @@ -6,13 +6,13 @@ . arpa .. - atf-c + atf-c tags=package=atf-dev .. - atf-c++ + atf-c++ tags=package=atf-dev .. bsm tags=package=audit-dev .. - bsnmp + bsnmp tags=package=bsnmp-dev .. c++ v1 @@ -278,9 +278,9 @@ .. lib9p tags=package=lib9p-dev .. - libipt + libipt tags=package=libipt-dev .. - libmilter + libmilter tags=package=libmilter-dev .. libxo tags=package=runtime-dev .. diff --git a/etc/mtree/BSD.tests.dist b/etc/mtree/BSD.tests.dist index bb1e40e69ba0..884a5aba18f1 100644 --- a/etc/mtree/BSD.tests.dist +++ b/etc/mtree/BSD.tests.dist @@ -85,6 +85,8 @@ cddl lib .. + oclo + .. sbin .. usr.bin @@ -461,6 +463,8 @@ .. libutil .. + libutil++ + .. libxo .. msun diff --git a/lib/libc/amd64/string/strrchr.S b/lib/libc/amd64/string/strrchr.S index e397bbcd3478..a22a821a1d4d 100644 --- a/lib/libc/amd64/string/strrchr.S +++ b/lib/libc/amd64/string/strrchr.S @@ -1,5 +1,6 @@ /*- * Copyright (c) 2023 The FreeBSD Foundation + * Copyright (c) 2026 Robert Clausecker <fuz@FreeBSD.org> * * This software was developed by Robert Clausecker <fuz@FreeBSD.org> * under sponsorship from the FreeBSD Foundation. @@ -65,77 +66,50 @@ ARCHENTRY(strrchr, scalar) xor %rax, %rcx # str ^ c or %r10, %rax # ensure str != 0 before string or %r10, %rcx # ensure str^c != 0 before string - bswap %rcx # in reverse order, to find last match - mov %rdi, %r10 # location of initial mismatch (if any) - xor %r11, %r11 # initial mismatch (none) + xor %r11, %r11 # vector of last match (0 -> no match) add $8, %rdi # advance to next iteration lea (%rax, %r8, 1), %rdx # str - 0x01..01 not %rax # ~str and %rdx, %rax # (str - 0x01..01) & ~str - and %r9, %rax # not including junk bits - jnz 1f # end of string? - - lea (%rcx, %r8, 1), %rdx # (str ^ c) - 0x01..01 - not %rcx # ~(str ^ c) - and %rdx, %rcx # ((str ^ c - 0x01..01) & ~(str ^ c) - and %r9, %rcx # not including junk bits - mov %rcx, %r11 # remember mismatch in head - jmp 0f - - /* main loop unrolled twice */ - ALIGN_TEXT -3: lea (%rcx, %r8, 1), %rdx # (str ^ c) - 0x01..01 - not %rcx # ~(str ^ c) - and %rdx, %rcx # ((str ^ c - 0x01..01) & ~(str ^ c) - and %r9, %rcx # not including junk bits - lea -8(%rdi), %rdx - cmovnz %rdx, %r10 # remember location of current mismatch - cmovnz %rcx, %r11 - -0: mov (%rdi), %rax # str - mov %rsi, %rcx - xor %rax, %rcx # str ^ c - bswap %rcx # in reverse order, to find last match - lea (%rax, %r8, 1), %rdx # str - 0x01..01 - not %rax # ~str - and %rdx, %rax # (str - 0x01..01) & ~str - and %r9, %rax # not including junk bits + and %r9, %rax # NUL bytes in str, not including junk bits jnz 2f # end of string? + /* main loop */ + ALIGN_TEXT +3: mov (%rdi), %rax # str + bswap %rcx # (str ^ c) in reverse order, to find last match lea (%rcx, %r8, 1), %rdx # (str ^ c) - 0x01..01 not %rcx # ~(str ^ c) and %rdx, %rcx # ((str ^ c - 0x01..01) & ~(str ^ c) - and %r9, %rcx # not including junk bits - cmovnz %rdi, %r10 # remember location of current mismatch - cmovnz %rcx, %r11 + and %r9, %rcx # matches in str, not including junk bits + cmovnz %rdi, %r10 # if match found, update match vector + cmovnz %rcx, %r11 # ... and match pointer - mov 8(%rdi), %rax # str - add $16, %rdi + add $8, %rdi # advance to next iteration mov %rsi, %rcx xor %rax, %rcx # str ^ c - bswap %rcx lea (%rax, %r8, 1), %rdx # str - 0x01..01 not %rax # ~str and %rdx, %rax # (str - 0x01..01) & ~str - and %r9, %rax # not including junk bits + and %r9, %rax # NUL bytes in str, not including junk bits jz 3b # end of string? - /* NUL found */ -1: sub $8, %rdi # undo advance past buffer -2: lea (%rcx, %r8, 1), %rdx # (str ^ c) - 0x01..01 + /* NUL found, check for match in tail */ +2: mov %rax, %rdx + neg %rax + xor %rdx, %rax # all bytes behind the NUL byte + or %rax, %rcx # (str ^ c) without matches behind NUL byte + bswap %rcx # (src ^ c) in reverse order, to find last match + lea (%rcx, %r8, 1), %rdx # (str ^ c) - 0x01..01 not %rcx # ~(str ^ c) and %rdx, %rcx # ((str ^ c - 0x01..01) & ~(str ^ c) - and %r9, %rcx # not including junk bits - lea -1(%rax), %rdx - xor %rdx, %rax # mask of bytes in the string - bswap %rdx # in reverse order - and %rdx, %rcx # c found in the tail? - cmovnz %rdi, %r10 - cmovnz %rcx, %r11 - bswap %r11 # unreverse byte order - bsr %r11, %rcx # last location of c in (R10) - shr $3, %rcx # as byte offset - lea (%r10, %rcx, 1), %rax # pointer to match + and %r9, %rcx # matches in str, not including junk bits + cmovnz %rdi, %r10 # if match found, update match vector + cmovnz %rcx, %r11 # ... and match pointer + tzcnt %r11, %rcx # location of last match + lea -1(%r10), %rax # address of last character in vector + shr $3, %ecx # as byte offset + sub %rcx, %rax # subtract character offset test %r11, %r11 # was there actually a match? cmovz %r11, %rax # if not, return null pointer ret diff --git a/lib/libc/db/hash/extern.h b/lib/libc/db/hash/extern.h index d3850752ad3a..690219d30606 100644 --- a/lib/libc/db/hash/extern.h +++ b/lib/libc/db/hash/extern.h @@ -54,7 +54,7 @@ void __reclaim_buf(HTAB *, BUFHEAD *); int __split_page(HTAB *, u_int32_t, u_int32_t); /* Default hash routine. */ -extern u_int32_t (*__default_hash)(const void *, size_t); +u_int32_t __default_hash(const void *, size_t); #ifdef HASH_STATISTICS extern int hash_accesses, hash_collisions, hash_expansions, hash_overflows; diff --git a/lib/libc/db/hash/hash_func.c b/lib/libc/db/hash/hash_func.c index 529180b7698d..29597a04d3c0 100644 --- a/lib/libc/db/hash/hash_func.c +++ b/lib/libc/db/hash/hash_func.c @@ -39,114 +39,9 @@ #include "page.h" #include "extern.h" -#ifdef notdef -static u_int32_t hash1(const void *, size_t) __unused; -static u_int32_t hash2(const void *, size_t) __unused; -static u_int32_t hash3(const void *, size_t) __unused; -#endif -static u_int32_t hash4(const void *, size_t); - -/* Default hash function. */ -u_int32_t (*__default_hash)(const void *, size_t) = hash4; - -#ifdef notdef -/* - * Assume that we've already split the bucket to which this key hashes, - * calculate that bucket, and check that in fact we did already split it. - * - * EJB's original hsearch hash. - */ -#define PRIME1 37 -#define PRIME2 1048583 - -u_int32_t -hash1(const void *key, size_t len) -{ - u_int32_t h; - u_int8_t *k; - - h = 0; - k = (u_int8_t *)key; - /* Convert string to integer */ - while (len--) - h = h * PRIME1 ^ (*k++ - ' '); - h %= PRIME2; - return (h); -} - -/* - * Phong Vo's linear congruential hash - */ -#define dcharhash(h, c) ((h) = 0x63c63cd9*(h) + 0x9c39c33d + (c)) - -u_int32_t -hash2(const void *key, size_t len) -{ - u_int32_t h; - u_int8_t *e, c, *k; - - k = (u_int8_t *)key; - e = k + len; - for (h = 0; k != e;) { - c = *k++; - if (!c && k > e) - break; - dcharhash(h, c); - } - return (h); -} - -/* - * This is INCREDIBLY ugly, but fast. We break the string up into 8 byte - * units. On the first time through the loop we get the "leftover bytes" - * (strlen % 8). On every other iteration, we perform 8 HASHC's so we handle - * all 8 bytes. Essentially, this saves us 7 cmp & branch instructions. If - * this routine is heavily used enough, it's worth the ugly coding. - * - * Ozan Yigit's original sdbm hash. - */ -u_int32_t -hash3(const void *key, size_t len) -{ - u_int32_t n, loop; - u_int8_t *k; - -#define HASHC n = *k++ + 65599 * n - - n = 0; - k = (u_int8_t *)key; - if (len > 0) { - loop = (len + 8 - 1) >> 3; - - switch (len & (8 - 1)) { - case 0: - do { /* All fall throughs */ - HASHC; - case 7: - HASHC; - case 6: - HASHC; - case 5: - HASHC; - case 4: - HASHC; - case 3: - HASHC; - case 2: - HASHC; - case 1: - HASHC; - } while (--loop); - } - - } - return (n); -} -#endif /* notdef */ - /* Chris Torek's hash function. */ u_int32_t -hash4(const void *key, size_t len) +__default_hash(const void *key, size_t len) { u_int32_t h, loop; const u_int8_t *k; diff --git a/lib/libc/gen/dup3.3 b/lib/libc/gen/dup3.3 index 338a9ae74c64..ec89ef77cf17 100644 --- a/lib/libc/gen/dup3.3 +++ b/lib/libc/gen/dup3.3 @@ -52,6 +52,8 @@ The close-on-fork flag on the new file descriptor is determined by the bit in .Fa flags . .Pp +The resolve-beneath flag on the new file descriptor is preserved. +.Pp If .Fa oldd \*(Ne diff --git a/lib/libc/gen/rtld_get_var.3 b/lib/libc/gen/rtld_get_var.3 index 092114e86d78..93aab133793b 100644 --- a/lib/libc/gen/rtld_get_var.3 +++ b/lib/libc/gen/rtld_get_var.3 @@ -73,6 +73,23 @@ but without the (or .Ev LD_32_ or any other ABI-specific) prefix. +.Pp +The list of variables that can be modified with the +.Fn rtld_set_var +function is: +.Bl -tag +.It Dv LD_BIND_NOT +.It Dv LD_BIND_NOW +.It Dv LD_DEBUG +.It Dv LD_DUMP_REL_PRE +.It Dv LD_DUMP_REL_POST +.It Dv LD_DYNAMIC_WEAK +.It Dv LD_LIBMAP_DISABLE +.It Dv LD_LIBRARY_PATH +.It Dv LD_LIBRARY_PATH_FDS +.It Dv LD_LIBRARY_PATH_RPATH +.It Dv LD_LOADFLTR +.El .Sh RETURN VALUES The .Fn rtld_get_var diff --git a/lib/libc/gen/syslog.3 b/lib/libc/gen/syslog.3 index 62140554f4f5..1e316c20d8d8 100644 --- a/lib/libc/gen/syslog.3 +++ b/lib/libc/gen/syslog.3 @@ -25,7 +25,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd July 3, 2023 +.Dd March 8, 2026 .Dt SYSLOG 3 .Os .Sh NAME @@ -34,7 +34,7 @@ .Nm openlog , .Nm closelog , .Nm setlogmask -.Nd control system log +.Nd control system message log .Sh LIBRARY .Lb libc .Sh SYNOPSIS diff --git a/lib/libc/tests/stdlib/cxa_thread_atexit_nothr_test.cc b/lib/libc/tests/stdlib/cxa_thread_atexit_nothr_test.cc index 0b3b9497a6bd..d70c6b1b88dc 100644 --- a/lib/libc/tests/stdlib/cxa_thread_atexit_nothr_test.cc +++ b/lib/libc/tests/stdlib/cxa_thread_atexit_nothr_test.cc @@ -30,7 +30,10 @@ #include <cstdio> #include <cstdlib> +#define AGAIN_CALL_LIMIT 20 + static FILE *output = NULL; +static int again_counter = 0; struct Foo { Foo() { ATF_REQUIRE(fprintf(output, "Created\n") > 0); } @@ -79,14 +82,16 @@ extern "C" int __cxa_thread_atexit(void (*)(void *), void *, void *); static void again(void *arg) { - - __cxa_thread_atexit(again, arg, &output); + if (again_counter < AGAIN_CALL_LIMIT) { + again_counter++; + __cxa_thread_atexit(again, arg, &output); + } } ATF_TEST_CASE_WITHOUT_HEAD(cxx__thread_inf_dtors); ATF_TEST_CASE_BODY(cxx__thread_inf_dtors) { - + skip("Skip since we only have main thread"); again(NULL); } diff --git a/lib/libc/tests/stdlib/cxa_thread_atexit_test.cc b/lib/libc/tests/stdlib/cxa_thread_atexit_test.cc index 628a70b510d1..6a5587698d37 100644 --- a/lib/libc/tests/stdlib/cxa_thread_atexit_test.cc +++ b/lib/libc/tests/stdlib/cxa_thread_atexit_test.cc @@ -30,7 +30,10 @@ #include <cstdlib> #include <thread> +#define AGAIN_CALL_LIMIT 20 + static FILE *output = NULL; +static int again_counter = 0; struct Foo { Foo() { ATF_REQUIRE(fprintf(output, "Created\n") > 0); } @@ -52,8 +55,10 @@ extern "C" int __cxa_thread_atexit(void (*)(void *), void *, void *); static void again(void *arg) { - - __cxa_thread_atexit(again, arg, &output); + if (again_counter < AGAIN_CALL_LIMIT) { + ++again_counter; + __cxa_thread_atexit(again, arg, &output); + } } struct Baz { @@ -164,6 +169,7 @@ ATF_TEST_CASE_BODY(cxx__thread_inf_dtors) std::thread t([]() { e.use(); }); t.join(); + ATF_REQUIRE_EQ(again_counter, AGAIN_CALL_LIMIT); } ATF_INIT_TEST_CASES(tcs) diff --git a/lib/libpmc/Makefile b/lib/libpmc/Makefile index 590f719ebff4..442efdc3d9c0 100644 --- a/lib/libpmc/Makefile +++ b/lib/libpmc/Makefile @@ -74,6 +74,7 @@ MAN+= pmc.haswell.3 MAN+= pmc.haswelluc.3 MAN+= pmc.haswellxeon.3 MAN+= pmc.iaf.3 +MAN+= pmc.ibs.3 MAN+= pmc.ivybridge.3 MAN+= pmc.ivybridgexeon.3 MAN+= pmc.sandybridge.3 diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c index ceba40aa7b39..ebb642e8d16b 100644 --- a/lib/libpmc/libpmc.c +++ b/lib/libpmc/libpmc.c @@ -696,7 +696,7 @@ ibs_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { char *e, *p, *q; - uint64_t ctl; + uint64_t ctl, ldlat; pmc_config->pm_caps |= (PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_PRECISE); @@ -714,23 +714,74 @@ ibs_allocate_pmc(enum pmc_event pe, char *ctrspec, return (-1); } + /* IBS only supports sampling mode */ + if (!PMC_IS_SAMPLING_MODE(pmc_config->pm_mode)) { + return (-1); + } + /* parse parameters */ - while ((p = strsep(&ctrspec, ",")) != NULL) { - if (KWPREFIXMATCH(p, "ctl=")) { - q = strchr(p, '='); - if (*++q == '\0') /* skip '=' */ + ctl = 0; + if (pe == PMC_EV_IBS_FETCH) { + while ((p = strsep(&ctrspec, ",")) != NULL) { + if (KWMATCH(p, "l3miss")) { + ctl |= IBS_FETCH_CTL_L3MISSONLY; + } else if (KWMATCH(p, "randomize")) { + ctl |= IBS_FETCH_CTL_RANDOMIZE; + } else { return (-1); + } + } - ctl = strtoull(q, &e, 0); - if (e == q || *e != '\0') + if (pmc_config->pm_count < IBS_FETCH_MIN_RATE || + pmc_config->pm_count > IBS_FETCH_MAX_RATE) + return (-1); + + ctl |= IBS_FETCH_INTERVAL_TO_CTL(pmc_config->pm_count); + } else { + while ((p = strsep(&ctrspec, ",")) != NULL) { + if (KWMATCH(p, "l3miss")) { + ctl |= IBS_OP_CTL_L3MISSONLY; + } else if (KWPREFIXMATCH(p, "ldlat=")) { + q = strchr(p, '='); + if (*++q == '\0') /* skip '=' */ + return (-1); + + ldlat = strtoull(q, &e, 0); + if (e == q || *e != '\0') + return (-1); + + /* + * IBS load latency filtering requires the + * latency to be a multiple of 128 and between + * 128 and 2048. The latency is stored in the + * IbsOpLatThrsh field, which only contains + * four bits so the processor computes + * (IbsOpLatThrsh+1)*128 as the value. + * + * AMD PPR Vol 1 for AMD Family 1Ah Model 02h + * C1 (57238) 2026-03-06 Revision 0.49. + */ + if (ldlat < 128 || ldlat > 2048) + return (-1); + ctl |= IBS_OP_CTL_LDLAT_TO_CTL(ldlat); + ctl |= IBS_OP_CTL_L3MISSONLY | IBS_OP_CTL_LATFLTEN; + } else if (KWMATCH(p, "randomize")) { + ctl |= IBS_OP_CTL_COUNTERCONTROL; + } else { return (-1); + } + } - pmc_config->pm_md.pm_ibs.ibs_ctl |= ctl; - } else { + if (pmc_config->pm_count < IBS_OP_MIN_RATE || + pmc_config->pm_count > IBS_OP_MAX_RATE) return (-1); - } + + ctl |= IBS_OP_INTERVAL_TO_CTL(pmc_config->pm_count); } + + pmc_config->pm_md.pm_ibs.ibs_ctl |= ctl; + return (0); } diff --git a/lib/libpmc/pmc.3 b/lib/libpmc/pmc.3 index 9a5b599759ff..cb28e0b786b9 100644 --- a/lib/libpmc/pmc.3 +++ b/lib/libpmc/pmc.3 @@ -224,6 +224,11 @@ performance measurement architecture version 2 and later. Programmable hardware counters present in CPUs conforming to the .Tn Intel performance measurement architecture version 1 and later. +.It Li PMC_CLASS_IBS +.Tn AMD +Instruction Based Sampling (IBS) counters present in +.Tn AMD +Family 10h and above. .It Li PMC_CLASS_K8 Programmable hardware counters present in .Tn "AMD Athlon64" @@ -491,6 +496,7 @@ following manual pages: .It Em "PMC Class" Ta Em "Manual Page" .It Li PMC_CLASS_IAF Ta Xr pmc.iaf 3 .It Li PMC_CLASS_IAP Ta Xr pmc.atom 3 , Xr pmc.core 3 , Xr pmc.core2 3 +.It Li PMC_CLASS_IBS Ta Xr pmc.ibs 3 .It Li PMC_CLASS_K8 Ta Xr pmc.amd 3 .It Li PMC_CLASS_TSC Ta Xr pmc.tsc 3 .El @@ -542,6 +548,7 @@ Doing otherwise is unsupported. .Xr pmc.haswelluc 3 , .Xr pmc.haswellxeon 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.ivybridge 3 , .Xr pmc.ivybridgexeon 3 , .Xr pmc.sandybridge 3 , diff --git a/lib/libpmc/pmc.amd.3 b/lib/libpmc/pmc.amd.3 index 047b31aa78bb..75c6331b000f 100644 --- a/lib/libpmc/pmc.amd.3 +++ b/lib/libpmc/pmc.amd.3 @@ -777,6 +777,7 @@ and the underlying hardware events used. .Xr pmc.core 3 , .Xr pmc.core2 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmclog 3 , diff --git a/lib/libpmc/pmc.core.3 b/lib/libpmc/pmc.core.3 index b4fa9ab661a4..4c41e7c7ad3b 100644 --- a/lib/libpmc/pmc.core.3 +++ b/lib/libpmc/pmc.core.3 @@ -786,6 +786,7 @@ may not count some transitions. .Xr pmc.atom 3 , .Xr pmc.core2 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmclog 3 , diff --git a/lib/libpmc/pmc.core2.3 b/lib/libpmc/pmc.core2.3 index 86604b7ff16c..7e544fad43b6 100644 --- a/lib/libpmc/pmc.core2.3 +++ b/lib/libpmc/pmc.core2.3 @@ -1101,6 +1101,7 @@ and the underlying hardware events used. .Xr pmc.atom 3 , .Xr pmc.core 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmc_cpuinfo 3 , diff --git a/lib/libpmc/pmc.iaf.3 b/lib/libpmc/pmc.iaf.3 index eaf45db140f5..c3528e472103 100644 --- a/lib/libpmc/pmc.iaf.3 +++ b/lib/libpmc/pmc.iaf.3 @@ -125,6 +125,7 @@ CPU, use the event specifier .Xr pmc.atom 3 , .Xr pmc.core 3 , .Xr pmc.core2 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmc_cpuinfo 3 , diff --git a/lib/libpmc/pmc.ibs.3 b/lib/libpmc/pmc.ibs.3 new file mode 100644 index 000000000000..f0534a0955d2 --- /dev/null +++ b/lib/libpmc/pmc.ibs.3 @@ -0,0 +1,153 @@ +.\" +.\" SPDX-License-Identifier: BSD-2-Clause +.\" +.\" Copyright (c) 2026, Netflix, Inc. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.Dd March 15, 2026 +.Dt PMC.IBS 3 +.Os +.Sh NAME +.Nm pmc.ibs +.Nd Instruction Based Sampling for +.Tn AMD +CPUs +.Sh LIBRARY +.Lb libpmc +.Sh SYNOPSIS +.In pmc.h +.Sh DESCRIPTION +AMD Instruction Based Sampling (IBS) was introduced with the K10 family of +CPUs. +AMD IBS is an alternative approach that samples instructions or micro-ops and +provides a per-instruction or micro-op breakdown of the sources of stalls. +.Pp +Unlike traditional counters, IBS can only be used in the sampling mode and +provides extra data embedded in the callchain. +IBS events set the PMC_F_MULTIPART flag to signify multiple payload types are +contained in the callchain. +The first 8 bytes of the callchain contain four tuples with a one byte type and +a one byte length field. +The regular PMC callchain can be found following the multipart payload. +.Pp +IBS only provides two events that analyze instruction fetches and instruction +execution. +The instruction fetch (ibs-fetch) event provides data on the processor +front-end including reporting instruction cache and TLB events. +The instruction execution (ibs-op) event provides data on the processor +execution including reporting mispredictions, data cache and TLB events. +You should use the AMD PMC counters documented in +.Xr pmc.amd 3 +to analyze stalls relating instruction issue including reservation contention. +.Pp +A guide to analyzing IBS data is provided in Appendix G of the +.Rs +.%B "Software Optimization Guide for AMD Family 10h and 12h Processors" +.%N "Publication No. 40546" +.%D "February 2011" +.%Q "Advanced Micro Devices, Inc." +.Re +A more recent document should be used for decoding all of the flags and fields +in the IBS data. +For example, see the AMD Zen 5 documentation +.Rs +.%B "Processor Programming Reference (PPR) for AMD Family 1Ah Model 02h" +.%N "Publication No. 57238" +.%D "March 6, 2026" +.%Q "Advanced Micro Devices, Inc." +.Re +.Ss PMC Features +AMD IBS supports the following capabilities. +.Bl -column "PMC_CAP_INTERRUPT" "Support" +.It Em Capability Ta Em Support +.It PMC_CAP_CASCADE Ta \&No +.It PMC_CAP_EDGE Ta Yes +.It PMC_CAP_INTERRUPT Ta Yes +.It PMC_CAP_INVERT Ta \&No +.It PMC_CAP_READ Ta \&No +.It PMC_CAP_PRECISE Ta Yes +.It PMC_CAP_SYSTEM Ta Yes +.It PMC_CAP_TAGGING Ta \&No +.It PMC_CAP_THRESHOLD Ta \&No +.It PMC_CAP_USER Ta \&No +.It PMC_CAP_WRITE Ta \&No +.El +.Pp +By default AMD IBS enables the edge, interrupt, system and precise flags. +.Ss Event Qualifiers +Event specifiers for AMD IBS can have the following optional +qualifiers: +.Bl -tag -width "ldlat=value" +.It Li l3miss +Configure IBS to only sample if an l3miss occurred. +.It Li ldlat= Ns Ar value +Configure the counter to only sample events with load latencies above +.Ar ldlat . +IBS only supports filtering latencies that are a multiple of 128 and between +128 and 2048. +Load latency filtering can only be used with ibs-op events and imply the +l3miss qualifier. +.It Li randomize +Randomize the sampling rate. +.El +.Ss AMD IBS Events Specifiers +The IBS event class provides only two event specifiers: +.Bl -tag -width indent +.It Li ibs-fetch Xo +.Op ,l3miss +.Op ,randomize +.Xc +Collect performance samples during instruction fetch. +The +.Ar randomize +qualifier randomly sets the bottom four bits of the sample rate. +.It Li ibs-op Xo +.Op ,l3miss +.Op ,ldlat= Ns Ar ldlat +.Op ,randomize +.Xc +Collect performance samples during instruction execution. +The +.Ar randomize +qualifier, upon reaching the maximum count, restarts the count with a value +between 1 and 127. +.El +.Pp +You may collect both events at the same time. +N.B. AMD discouraged doing so with certain older processors, stating that +sampling both simultaneously perturbs the results. +Please see the processor programming reference for your specific processor. +.Sh SEE ALSO +.Xr pmc 3 , +.Xr pmc.amd 3 , +.Xr pmc.soft 3 , +.Xr pmc.tsc 3 , +.Xr pmclog 3 , +.Xr hwpmc 4 +.Sh HISTORY +AMD IBS support was first introduced in +.Fx 16.0 . +.Sh AUTHORS +AMD IBS support and this manual page were written +.An Ali Mashtizadeh Aq Mt ali@mashtizadeh.com +and sponsored by Netflix, Inc. diff --git a/lib/libpmc/pmc.soft.3 b/lib/libpmc/pmc.soft.3 index 08d5af63d02d..f58b3e8ffa26 100644 --- a/lib/libpmc/pmc.soft.3 +++ b/lib/libpmc/pmc.soft.3 @@ -90,6 +90,7 @@ Write page fault. .Xr pmc.corei7 3 , .Xr pmc.corei7uc 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.tsc 3 , .Xr pmc.ucf 3 , .Xr pmc.westmereuc 3 , diff --git a/lib/libpmc/pmc.tsc.3 b/lib/libpmc/pmc.tsc.3 index 4834d897f90c..73e2377df0c7 100644 --- a/lib/libpmc/pmc.tsc.3 +++ b/lib/libpmc/pmc.tsc.3 @@ -62,6 +62,7 @@ maps to the TSC. .Xr pmc.core 3 , .Xr pmc.core2 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmclog 3 , .Xr hwpmc 4 diff --git a/lib/libpmc/pmc.ucf.3 b/lib/libpmc/pmc.ucf.3 index a7cea6bb57f9..37ee0f87a951 100644 --- a/lib/libpmc/pmc.ucf.3 +++ b/lib/libpmc/pmc.ucf.3 @@ -88,6 +88,7 @@ offset C0H under device number 0 and Function 0. .Xr pmc.corei7 3 , .Xr pmc.corei7uc 3 , .Xr pmc.iaf 3 , +.Xr pmc.ibs 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmc.westmere 3 , diff --git a/lib/libpmc/pmu-events/arch/x86/mapfile.csv b/lib/libpmc/pmu-events/arch/x86/mapfile.csv index f2211e1e1753..40e859b25f19 100644 --- a/lib/libpmc/pmu-events/arch/x86/mapfile.csv +++ b/lib/libpmc/pmu-events/arch/x86/mapfile.csv @@ -52,6 +52,9 @@ GenuineIntel-6-BA,v1,alderlake,core GenuineIntel-6-BF,v1,alderlake,core GenuineIntel-6-8F,v1,sapphirerapids,core GenuineIntel-6-BE,v1,alderlaken,core +GenuineIntel-6-AA,v1,meteorlake,core +GenuineIntel-6-AC,v1,meteorlake,core +GenuineIntel-6-B5,v1,meteorlake,core AuthenticAMD-23-[012][0-9A-F],v2,amdzen1,core AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core AuthenticAMD-25-[0245][[:xdigit:]],v1,amdzen3,core diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/cache.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/cache.json new file mode 100644 index 000000000000..6419bc36f249 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/cache.json @@ -0,0 +1,1733 @@ +[ + { + "BriefDescription": "Counts the number of request that were not accepted into the L2Q because the L2Q is FULL.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x31", + "EventName": "CORE_REJECT_L2Q.ANY", + "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2Q due to a full or nearly full w condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event.) Counts on a per core basis.", + "SampleAfterValue": "200003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x51", + "EventName": "DL1.DIRTY_EVICTION", + "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "L1D.HWPF_MISS", + "Counter": "0,1,2,3", + "EventCode": "0x51", + "EventName": "L1D.HWPF_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.", + "Counter": "0,1,2,3", + "EventCode": "0x51", + "EventName": "L1D.REPLACEMENT", + "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.FB_FULL", + "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS", + "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.L2_STALLS", + "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of L1D misses that are outstanding", + "Counter": "0,1,2,3", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.PENDING", + "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles with L1D load Misses outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x48", + "EventName": "L1D_PEND_MISS.PENDING_CYCLES", + "PublicDescription": "Counts duration of L1D miss outstanding in cycles.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache lines filling L2", + "Counter": "0,1,2,3", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.ALL", + "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.", + "SampleAfterValue": "100003", + "UMask": "0x1f", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.E", + "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state. Counts on a per core basis.", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.F", + "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state. Counts on a per core basis.", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.M", + "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state. Counts on a per core basis.", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x25", + "EventName": "L2_LINES_IN.S", + "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state. Counts on a per core basis.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.NON_SILENT", + "PublicDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill. Increments on the core that brought the line in originally.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.NON_SILENT", + "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.SILENT", + "PublicDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill. Increments on the core that brought the line in originally.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.SILENT", + "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache. These lines are typically in Shared or Exclusive state. A non-threaded event.", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses", + "Counter": "0,1,2,3", + "EventCode": "0x26", + "EventName": "L2_LINES_OUT.USELESS_HWPF", + "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x30", + "EventName": "L2_REJECT_XQ.ANY", + "PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).", + "SampleAfterValue": "200003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.ALL", + "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]", + "SampleAfterValue": "200003", + "UMask": "0xff", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of L2 Cache Accesses that resulted in a Hit from a front door request only (does not include rejects or recycles), per core event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x24", + "EventName": "L2_REQUEST.HIT", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.HIT", + "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]", + "SampleAfterValue": "200003", + "UMask": "0xdf", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of total L2 Cache Accesses that resulted in a Miss from a front door request only (does not include rejects or recycles), per core event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x24", + "EventName": "L2_REQUEST.MISS", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_REQUEST.MISS", + "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]", + "SampleAfterValue": "200003", + "UMask": "0x3f", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of L2 Cache Accesses that miss the L2 and get BBL reject short and long rejects, per core event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x24", + "EventName": "L2_REQUEST.REJECTS", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "L2 code requests", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_CODE_RD", + "PublicDescription": "Counts the total number of L2 code requests.", + "SampleAfterValue": "200003", + "UMask": "0xe4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand Data Read access L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD", + "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.", + "SampleAfterValue": "200003", + "UMask": "0xe1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand requests that miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_MISS", + "PublicDescription": "Counts demand requests that miss L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x27", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand requests to L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES", + "PublicDescription": "Counts demand requests to L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xe7", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2_RQSTS.ALL_HWPF", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_HWPF", + "SampleAfterValue": "200003", + "UMask": "0xf0", + "Unit": "cpu_core" + }, + { + "BriefDescription": "RFO requests to L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.ALL_RFO", + "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.", + "SampleAfterValue": "200003", + "UMask": "0xe2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache hits when fetching instructions, code reads.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.CODE_RD_HIT", + "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.", + "SampleAfterValue": "200003", + "UMask": "0xc4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache misses when fetching instructions", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.CODE_RD_MISS", + "PublicDescription": "Counts L2 cache misses when fetching instructions.", + "SampleAfterValue": "200003", + "UMask": "0x24", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand Data Read requests that hit L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", + "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xc1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand Data Read miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS", + "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.", + "SampleAfterValue": "200003", + "UMask": "0x21", + "Unit": "cpu_core" + }, + { + "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.HIT", + "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]", + "SampleAfterValue": "200003", + "UMask": "0xdf", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2_RQSTS.HWPF_MISS", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.HWPF_MISS", + "SampleAfterValue": "200003", + "UMask": "0x30", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.MISS", + "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]", + "SampleAfterValue": "200003", + "UMask": "0x3f", + "Unit": "cpu_core" + }, + { + "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.REFERENCES", + "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]", + "SampleAfterValue": "200003", + "UMask": "0xff", + "Unit": "cpu_core" + }, + { + "BriefDescription": "RFO requests that hit L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.RFO_HIT", + "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0xc2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "RFO requests that miss L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.RFO_MISS", + "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x22", + "Unit": "cpu_core" + }, + { + "BriefDescription": "SW prefetch requests that hit L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.SWPF_HIT", + "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.", + "SampleAfterValue": "200003", + "UMask": "0xc8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "SW prefetch requests that miss L2 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x24", + "EventName": "L2_RQSTS.SWPF_MISS", + "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.", + "SampleAfterValue": "200003", + "UMask": "0x28", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 writebacks that access L2 cache", + "Counter": "0,1,2,3", + "EventCode": "0x23", + "EventName": "L2_TRANS.L2_WB", + "PublicDescription": "Counts L2 writebacks that access L2 cache.", + "SampleAfterValue": "200003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when L1D is locked", + "Counter": "0,1,2,3", + "EventCode": "0x42", + "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION", + "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.MISS", + "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.", + "SampleAfterValue": "200003", + "UMask": "0x41", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.MISS", + "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.", + "SampleAfterValue": "100003", + "UMask": "0x41", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.REFERENCE", + "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.", + "SampleAfterValue": "200003", + "UMask": "0x4f", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x2e", + "EventName": "LONGEST_LAT_CACHE.REFERENCE", + "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.", + "SampleAfterValue": "100003", + "UMask": "0x4f", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT", + "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which missed in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x7e", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which hit in the LLC, no snoop was required. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT_NOSNOOP", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which missed all the caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which missed all the caches. DRAM, MMIO or other LOCAL memory type provides the data. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x35", + "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS_LOCALMEM", + "SampleAfterValue": "1000003", + "UMask": "0x50", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x7f", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT", + "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which missed in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x7e", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT", + "SampleAfterValue": "1000003", + "UMask": "0x6", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, no snoop was required. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_NOSNOOP", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, a snoop was required, the snoop misses or the snoop hits but NO_FWD. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_SNOOP", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x78", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a demand load miss and the data was provided from an unknown source. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_LOCALMEM", + "SampleAfterValue": "1000003", + "UMask": "0x50", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x34", + "EventName": "MEM_BOUND_STALLS_LOAD.SBFULL", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired load instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ALL_LOADS", + "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x81", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired store instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ALL_STORES", + "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x82", + "Unit": "cpu_core" + }, + { + "BriefDescription": "All retired memory instructions.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.ANY", + "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x83", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions with locked access.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.LOCK_LOADS", + "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x21", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions that split across a cacheline boundary.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.SPLIT_LOADS", + "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x41", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired store instructions that split across a cacheline boundary.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.SPLIT_STORES", + "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x42", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions that hit the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS", + "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x9", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired store instructions that hit the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_HIT_STORES", + "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0xa", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions that miss the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS", + "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x11", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired store instructions that miss the STLB.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES", + "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x12", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Completed demand load uops that miss the L1 d-cache.", + "Counter": "0,1,2,3", + "EventCode": "0x43", + "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY", + "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)", + "SampleAfterValue": "1000003", + "UMask": "0xfd", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", + "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0", + "SampleAfterValue": "20011", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", + "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0", + "SampleAfterValue": "20011", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE", + "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd2", + "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", + "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0", + "SampleAfterValue": "20011", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd3", + "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", + "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_MISC_RETIRED.UC", + "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.FB_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions with L1 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L1_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions missed L1 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L1_MISS", + "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0", + "SampleAfterValue": "200003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions with L2 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L2_HIT", + "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions missed L2 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L2_MISS", + "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0", + "SampleAfterValue": "100021", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions with L3 cache hits as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L3_HIT", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100021", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired load instructions missed L3 cache as data sources", + "Counter": "0,1,2,3", + "Data_LA": "1", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_RETIRED.L3_MISS", + "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "50021", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_HITM", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and no data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_NO_FWD", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and non-modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_WITH_FWD", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd4", + "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LOCAL_DRAM", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", + "SampleAfterValue": "200003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", + "SampleAfterValue": "200003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT", + "SampleAfterValue": "200003", + "UMask": "0x1c", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xd1", + "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", + "SampleAfterValue": "200003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ALL", + "SampleAfterValue": "20003", + "UMask": "0x7", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF", + "SampleAfterValue": "20003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.RSV", + "SampleAfterValue": "20003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x04", + "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF", + "SampleAfterValue": "20003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "MEM_STORE_RETIRED.L2_HIT", + "Counter": "0,1,2,3", + "EventCode": "0x44", + "EventName": "MEM_STORE_RETIRED.L2_HIT", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.ALL", + "SampleAfterValue": "200003", + "UMask": "0x83", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load ops retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", + "SampleAfterValue": "200003", + "UMask": "0x81", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of store ops retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.ALL_STORES", + "SampleAfterValue": "200003", + "UMask": "0x82", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024", + "MSRIndex": "0x3F6", + "MSRValue": "0x400", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128", + "MSRIndex": "0x3F6", + "MSRValue": "0x80", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16", + "MSRIndex": "0x3F6", + "MSRValue": "0x10", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048", + "MSRIndex": "0x3F6", + "MSRValue": "0x800", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256", + "MSRIndex": "0x3F6", + "MSRValue": "0x100", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32", + "MSRIndex": "0x3F6", + "MSRValue": "0x20", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4", + "MSRIndex": "0x3F6", + "MSRValue": "0x4", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512", + "MSRIndex": "0x3F6", + "MSRValue": "0x200", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64", + "MSRIndex": "0x3F6", + "MSRValue": "0x40", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.", + "Counter": "0,1", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8", + "MSRIndex": "0x3F6", + "MSRValue": "0x8", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load uops retired that performed one or more locks", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", + "SampleAfterValue": "200003", + "UMask": "0x21", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of memory uops retired that were splits.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT", + "SampleAfterValue": "200003", + "UMask": "0x43", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired split load uops.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", + "SampleAfterValue": "200003", + "UMask": "0x41", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired split store uops.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", + "SampleAfterValue": "200003", + "UMask": "0x42", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.STLB_MISS", + "SampleAfterValue": "200003", + "UMask": "0x13", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS", + "SampleAfterValue": "200003", + "UMask": "0x11", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES", + "SampleAfterValue": "200003", + "UMask": "0x12", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of stores uops retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xd0", + "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY", + "SampleAfterValue": "1000003", + "UMask": "0x6", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired memory uops for any access", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe5", + "EventName": "MEM_UOP_RETIRED.ANY", + "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.COREWB_M.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0008", + "PublicDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.COREWB_NONM.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C1000", + "PublicDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x4003C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x8003C0004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10001", + "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10001", + "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0001", + "PublicDescription": "Counts demand data reads that were supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0001", + "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0001", + "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x4003C0001", + "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x8003C0001", + "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x8003C0001", + "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10002", + "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10002", + "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C0002", + "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0002", + "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10003C0002", + "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.READS_TO_CORE.L3_HIT", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3F803C4477", + "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Any memory transaction that reached the SQ.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS", + "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..", + "SampleAfterValue": "100003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand and prefetch data reads", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DATA_RD", + "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cacheable and Non-Cacheable code read requests", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD", + "PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand Data Read requests sent to uncore", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD", + "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.DEMAND_RFO", + "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD", + "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", + "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD", + "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", + "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", + "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.", + "Counter": "0,1,2,3", + "EventCode": "0x2c", + "EventName": "SQ_MISC.BUS_LOCK", + "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.ANY", + "SampleAfterValue": "100003", + "UMask": "0xf", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of PREFETCHNTA instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.NTA", + "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of PREFETCHW instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.PREFETCHW", + "PublicDescription": "Counts the number of PREFETCHW instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of PREFETCHT0 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.T0", + "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.", + "Counter": "0,1,2,3", + "EventCode": "0x40", + "EventName": "SW_PREFETCH_ACCESS.T1_T2", + "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ICACHE", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/floating-point.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/floating-point.json new file mode 100644 index 000000000000..1ccbd54904c5 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/floating-point.json @@ -0,0 +1,368 @@ +[ + { + "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event counts the cycles the floating point divider is busy.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.FPDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of active floating point dividers per cycle in the loop stage.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_OCCUPANCY", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point divider uops executed per cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcd", + "EventName": "ARITH.FPDIV_UOPS", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts all microcode FP assists.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.FP", + "PublicDescription": "Counts all microcode Floating Point assists.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "ASSISTS.SSE_AVX_MIX", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.SSE_AVX_MIX", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_0", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_1", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.PORT_5", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", + "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS", + "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x18", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR", + "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", + "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE", + "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of any Vector retired FP arithmetic instructions", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_ARITH_INST_RETIRED.VECTOR", + "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.", + "SampleAfterValue": "1000003", + "UMask": "0xfc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.ALL", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.DP", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP32", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.FP64", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "EventCode": "0xc8", + "EventName": "FP_FLOPS_RETIRED.SP", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of floating point retired instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_INST_RETIRED.128B_DP", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_INST_RETIRED.128B_SP", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_INST_RETIRED.256B_DP", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_INST_RETIRED.32B_SP", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc7", + "EventName": "FP_INST_RETIRED.64B_DP", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on all floating point ports.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.ALL", + "SampleAfterValue": "1000003", + "UMask": "0xf", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.P0", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 1.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.P1", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 2.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.P2", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0, 1, 2.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.PRIMARY", + "SampleAfterValue": "1000003", + "UMask": "0xe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on floating point and vector integer store data port.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "FP_VINT_UOPS_EXECUTED.STD", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.FP_ASSIST", + "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.", + "SampleAfterValue": "20003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.FPDIV", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_atom" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/frontend.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/frontend.json new file mode 100644 index 000000000000..dcf8c8e720f3 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/frontend.json @@ -0,0 +1,623 @@ +[ + { + "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe6", + "EventName": "BACLEARS.ANY", + "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "SampleAfterValue": "200003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Clears due to Unknown Branches.", + "Counter": "0,1,2,3", + "EventCode": "0x60", + "EventName": "BACLEARS.ANY", + "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Stalls caused by changing prefix length of the instruction.", + "Counter": "0,1,2,3", + "EventCode": "0x87", + "EventName": "DECODE.LCP", + "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.", + "SampleAfterValue": "500009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles the Microcode Sequencer is busy.", + "Counter": "0,1,2,3", + "EventCode": "0x87", + "EventName": "DECODE.MS_BUSY", + "SampleAfterValue": "500009", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "DSB-to-MITE switch true penalty cycles.", + "Counter": "0,1,2,3", + "EventCode": "0x61", + "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES", + "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged with having preceded with frontend bound behavior", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ALL", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired ANT branches", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ANY_ANT", + "MSRIndex": "0x3F7", + "MSRValue": "0x9", + "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired Instructions who experienced DSB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x1", + "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a baclear", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.BRANCH_DETECT", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles /empty issue slots due to a btclear", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.BRANCH_RESTEER", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.CISC", + "PublicDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged every cycle the decoder is unable to send 3 uops per cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.DECODE", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired Instructions who experienced a critical DSB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.DSB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x11", + "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to icache miss", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ICACHE", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired Instructions who experienced iTLB true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x14", + "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.L1I_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x12", + "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.L2_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x13", + "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_1", + "MSRIndex": "0x3F7", + "MSRValue": "0x600106", + "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_128", + "MSRIndex": "0x3F7", + "MSRValue": "0x608006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_16", + "MSRIndex": "0x3F7", + "MSRValue": "0x601006", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2", + "MSRIndex": "0x3F7", + "MSRValue": "0x600206", + "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_256", + "MSRIndex": "0x3F7", + "MSRValue": "0x610006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1", + "MSRIndex": "0x3F7", + "MSRValue": "0x100206", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_32", + "MSRIndex": "0x3F7", + "MSRValue": "0x602006", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_4", + "MSRIndex": "0x3F7", + "MSRValue": "0x600406", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_512", + "MSRIndex": "0x3F7", + "MSRValue": "0x620006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_64", + "MSRIndex": "0x3F7", + "MSRValue": "0x604006", + "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_8", + "MSRIndex": "0x3F7", + "MSRValue": "0x600806", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted Retired ANT branches", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.MISP_ANT", + "MSRIndex": "0x3F7", + "MSRValue": "0x9", + "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.MS_FLOWS", + "MSRIndex": "0x3F7", + "MSRValue": "0x8", + "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of instruction retired tagged after a wasted issue slot if none of the previous events occurred", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.OTHER", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a predecode wrong.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.PREDECODE", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.STLB_MISS", + "MSRIndex": "0x3F7", + "MSRValue": "0x15", + "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc6", + "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH", + "MSRIndex": "0x3F7", + "MSRValue": "0x17", + "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x80", + "EventName": "ICACHE.ACCESSES", + "SampleAfterValue": "200003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x80", + "EventName": "ICACHE.MISSES", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.", + "Counter": "0,1,2,3", + "EventCode": "0x80", + "EventName": "ICACHE_DATA.STALLS", + "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.", + "SampleAfterValue": "500009", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "ICACHE_DATA.STALL_PERIODS", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x80", + "EventName": "ICACHE_DATA.STALL_PERIODS", + "SampleAfterValue": "500009", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.", + "Counter": "0,1,2,3", + "EventCode": "0x83", + "EventName": "ICACHE_TAG.STALLS", + "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "ICACHE_TAG.STALLS_INUSE", + "Counter": "0,1,2,3", + "EventCode": "0x83", + "EventName": "ICACHE_TAG.STALLS_INUSE", + "SampleAfterValue": "200003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "ICACHE_TAG.STALLS_ISB", + "Counter": "0,1,2,3", + "EventCode": "0x83", + "EventName": "ICACHE_TAG.STALLS_ISB", + "SampleAfterValue": "200003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.DSB_CYCLES_ANY", + "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles DSB is delivering optimal number of Uops", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0x79", + "EventName": "IDQ.DSB_CYCLES_OK", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.DSB_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles MITE is delivering any Uop", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.MITE_CYCLES_ANY", + "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles MITE is delivering optimal number of Uops", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0x79", + "EventName": "IDQ.MITE_CYCLES_OK", + "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.MITE_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x79", + "EventName": "IDQ.MS_CYCLES_ANY", + "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.", + "SampleAfterValue": "2000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of switches from DSB or MITE to the MS", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0x79", + "EventName": "IDQ.MS_SWITCHES", + "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy", + "Counter": "0,1,2,3", + "EventCode": "0x79", + "EventName": "IDQ.MS_UOPS", + "PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CORE", + "PublicDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", + "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0x9c", + "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK", + "Invert": "1", + "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", + "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", + "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0x9c", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", + "Invert": "1", + "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cycles that the micro-sequencer is busy.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "MS_DECODED.MS_BUSY", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/memory.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/memory.json new file mode 100644 index 000000000000..7cdd5cb39009 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/memory.json @@ -0,0 +1,499 @@ +[ + { + "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "2", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "6", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x6", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.ANY", + "SampleAfterValue": "1000003", + "UMask": "0x7f", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.ANY_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xff", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_BOUND_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xf4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a DL1 miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.L1_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x81", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to other block cases.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.OTHER", + "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to other block cases such as pipeline conflicts, fences, etc.", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.OTHER_AT_RET", + "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.", + "SampleAfterValue": "1000003", + "UMask": "0xc0", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a pagewalk.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.PGWALK", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.PGWALK_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0xa0", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a store address match.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.ST_ADDR", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.ST_ADDR_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x84", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to request buffers full or lock in progress.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.WCB_FULL", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to request buffers full or lock in progress.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.WCB_FULL_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x82", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "SampleAfterValue": "20003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of machine clears due to memory ordering conflicts.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "2", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "3", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "5", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS", + "PublicDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "9", + "EventCode": "0x47", + "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS", + "PublicDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).", + "SampleAfterValue": "1000003", + "UMask": "0x9", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024", + "MSRIndex": "0x3F6", + "MSRValue": "0x400", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "53", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", + "MSRIndex": "0x3F6", + "MSRValue": "0x80", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "1009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", + "MSRIndex": "0x3F6", + "MSRValue": "0x10", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "20011", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048", + "MSRIndex": "0x3F6", + "MSRValue": "0x800", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "23", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", + "MSRIndex": "0x3F6", + "MSRValue": "0x100", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "503", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", + "MSRIndex": "0x3F6", + "MSRValue": "0x20", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", + "MSRIndex": "0x3F6", + "MSRValue": "0x4", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", + "MSRIndex": "0x3F6", + "MSRValue": "0x200", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "101", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", + "MSRIndex": "0x3F6", + "MSRValue": "0x40", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "2003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.", + "Counter": "1,2,3,4,5,6,7", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", + "MSRIndex": "0x3F6", + "MSRValue": "0x8", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0", + "SampleAfterValue": "50021", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.", + "Counter": "0", + "Data_LA": "1", + "EventCode": "0xcd", + "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE", + "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts misaligned loads that are 4K page splits.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts misaligned stores that are 4K page splits.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x13", + "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_CODE_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00004", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000001", + "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were supplied by DRAM.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000001", + "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00001", + "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_DATA_RD.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00001", + "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.DRAM", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x184000002", + "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.DEMAND_RFO.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00002", + "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.DEMAND_RFO.L3_MISS", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x3FBFC00002", + "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts demand data read requests that miss the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x21", + "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD", + "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.", + "Counter": "0,1,2,3", + "EventCode": "0x20", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", + "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/mtl-metrics.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/mtl-metrics.json new file mode 100644 index 000000000000..948c16a1f95b --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/mtl-metrics.json @@ -0,0 +1,2825 @@ +[ + { + "BriefDescription": "C10 residency percent per package", + "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C10_Pkg_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C1 residency percent per core", + "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C1_Core_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C2 residency percent per package", + "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C2_Pkg_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C3 residency percent per package", + "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C3_Pkg_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C6 residency percent per core", + "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C6_Core_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C6 residency percent per package", + "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C6_Pkg_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C7 residency percent per core", + "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C7_Core_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "C8 residency percent per package", + "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@", + "MetricGroup": "Power", + "MetricName": "C8_Pkg_Residency", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "Percentage of cycles spent in System Management Interrupts.", + "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)", + "MetricGroup": "smi", + "MetricName": "smi_cycles", + "MetricThreshold": "smi_cycles > 0.1", + "ScaleUnit": "100%" + }, + { + "BriefDescription": "Number of SMI interrupts.", + "MetricExpr": "msr@smi@", + "MetricGroup": "smi", + "MetricName": "smi_num", + "ScaleUnit": "1SMI#" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions", + "MetricExpr": "tma_core_bound", + "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group", + "MetricName": "tma_allocation_restriction", + "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "Default;TopdownL1;tma_L1_group", + "MetricName": "tma_backend_bound", + "MetricThreshold": "tma_backend_bound > 0.1", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "Default;TopdownL1;tma_L1_group", + "MetricName": "tma_bad_speculation", + "MetricThreshold": "tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group", + "MetricName": "tma_branch_detect", + "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)", + "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts", + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group", + "MetricName": "tma_branch_mispredicts", + "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group", + "MetricName": "tma_branch_resteer", + "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group", + "MetricName": "tma_cisc", + "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group", + "MetricName": "tma_core_bound", + "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group", + "MetricName": "tma_decode", + "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming", + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_fast_nuke", + "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "Default;TopdownL1;tma_L1_group", + "MetricName": "tma_frontend_bound", + "MetricThreshold": "tma_frontend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL1;Default", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group", + "MetricName": "tma_icache_misses", + "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group", + "MetricName": "tma_ifetch_bandwidth", + "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group", + "MetricName": "tma_ifetch_latency", + "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per Floating Point (FP) Operation", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_FLOPS_RETIRED.ALL@", + "MetricGroup": "Flops", + "MetricName": "tma_info_arith_inst_mix_ipflop", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@FP_INST_RETIRED.128B_DP@ + cpu_atom@FP_INST_RETIRED.128B_SP@)", + "MetricGroup": "Flops", + "MetricName": "tma_info_arith_inst_mix_ipfparith_avx128", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_INST_RETIRED.64B_DP@", + "MetricGroup": "Flops", + "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_dp", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_INST_RETIRED.32B_SP@", + "MetricGroup": "Flops", + "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_sp", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss", + "MetricExpr": "100 * (cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ + cpu_atom@LD_HEAD.PGWALK_AT_RET@) / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "Ifetch", + "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles", + "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "Load_Store_Miss", + "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles", + "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall", + "MetricExpr": "100 * cpu_atom@LD_HEAD.ANY_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "Mem_Exec", + "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles", + "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricName": "tma_info_br_inst_mix_ipbranch", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.NEAR_CALL@", + "MetricName": "tma_info_br_inst_mix_ipcall", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.FAR_BRANCH@u", + "MetricName": "tma_info_br_inst_mix_ipfarbranch", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)", + "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.COND_TAKEN@", + "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.INDIRECT@", + "MetricName": "tma_info_br_inst_mix_ipmisp_indirect", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per retired return Branch Misprediction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.RETURN@", + "MetricName": "tma_info_br_inst_mix_ipmisp_ret", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per retired Branch Misprediction", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@", + "MetricName": "tma_info_br_inst_mix_ipmispredict", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Ratio of all branches which mispredict", + "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Ratio between Mispredicted branches and unknown branches", + "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BACLEARS.ANY@", + "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full", + "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.LD_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full", + "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.RSV@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full", + "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycles Per Instruction", + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@INST_RETIRED.ANY@", + "MetricName": "tma_info_core_cpi", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricExpr": "cpu_atom@FP_FLOPS_RETIRED.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "Flops", + "MetricName": "tma_info_core_flopc", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions Per Cycle", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_core_ipc", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Uops Per Instruction", + "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL_P@ / cpu_atom@INST_RETIRED.ANY@", + "MetricName": "tma_info_core_upi", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.L2_HIT@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@", + "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss doesn't hit in the L2", + "MetricExpr": "100 * (cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_HIT@ + cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_MISS@) / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@", + "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2miss", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@", + "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_MISS@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@", + "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.L2_HIT@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses in the L2", + "MetricExpr": "100 * (cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_HIT@ + cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_MISS@) / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2miss", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3", + "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_MISS@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block", + "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_store_bound_l1_bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement", + "MetricExpr": "100 * (cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ + cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@) / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_store_bound_load_bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full", + "MetricExpr": "100 * (cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@MEM_SCHEDULER_BLOCK.ALL@) * tma_mem_scheduler", + "MetricGroup": "load_store_bound", + "MetricName": "tma_info_load_store_bound_store_bound", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists", + "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.FP_ASSIST@ / cpu_atom@INST_RETIRED.ANY@", + "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults", + "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.PAGE_FAULT@ / cpu_atom@INST_RETIRED.ANY@", + "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code", + "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.SMC@ / cpu_atom@INST_RETIRED.ANY@", + "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block", + "MetricExpr": "100 * cpu_atom@LD_BLOCKS.ADDRESS_ALIAS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@", + "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block", + "MetricExpr": "100 * cpu_atom@LD_BLOCKS.DATA_UNKNOWN@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@", + "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss", + "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@", + "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc", + "MetricExpr": "100 * cpu_atom@LD_HEAD.OTHER_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@", + "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk", + "MetricExpr": "100 * cpu_atom@LD_HEAD.PGWALK_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@", + "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss", + "MetricExpr": "100 * cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@", + "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match", + "MetricExpr": "100 * cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@", + "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per Load", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@", + "MetricName": "tma_info_mem_mix_ipload", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instructions per Store", + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_STORES@", + "MetricName": "tma_info_mem_mix_ipstore", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks", + "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.LOCK_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@", + "MetricName": "tma_info_mem_mix_load_locks_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of total non-speculative loads that are splits", + "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.SPLIT_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@", + "MetricName": "tma_info_mem_mix_load_splits_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Ratio of mem load uops to all uops", + "MetricExpr": "1e3 * cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@", + "MetricName": "tma_info_mem_mix_memload_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction", + "MetricExpr": "100 * cpu_atom@SERIALIZATION.C01_MS_SCB@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricName": "tma_info_serialization_%_tpause_cycles", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Average CPU Utilization", + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_atom@", + "MetricName": "tma_info_system_cpu_utilization", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Giga Floating Point Operations Per Second", + "MetricExpr": "cpu_atom@FP_FLOPS_RETIRED.ALL@ / (duration_time * 1e9)", + "MetricGroup": "Flops", + "MetricName": "tma_info_system_gflops", + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Fraction of cycles spent in Kernel mode", + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@k / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_kernel_utilization", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "PerfMon Event Multiplexing accuracy indicator", + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@ / cpu_atom@CPU_CLK_UNHALTED.CORE@", + "MetricName": "tma_info_system_mux", + "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Average Frequency Utilization relative nominal frequency", + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@CPU_CLK_UNHALTED.REF_TSC@", + "MetricGroup": "Power", + "MetricName": "tma_info_system_turbo_utilization", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of all uops which are FPDiv uops", + "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.FPDIV@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@", + "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of all uops which are IDiv uops", + "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.IDIV@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@", + "MetricName": "tma_info_uop_mix_idiv_uop_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of all uops which are microcode ops", + "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.MS@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@", + "MetricName": "tma_info_uop_mix_microcode_uop_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Percentage of all uops which are x87 uops", + "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.X87@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@", + "MetricName": "tma_info_uop_mix_x87_uop_ratio", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB_MISS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group", + "MetricName": "tma_itlb_misses", + "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation", + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group", + "MetricName": "tma_machine_clears", + "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", + "MetricName": "tma_mem_scheduler", + "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", + "MetricName": "tma_non_mem_scheduler", + "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)", + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_nuke", + "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group", + "MetricName": "tma_other_fb", + "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.", + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group", + "MetricName": "tma_predecode", + "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", + "MetricName": "tma_register", + "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", + "MetricName": "tma_reorder_buffer", + "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation", + "MetricExpr": "tma_backend_bound - tma_core_bound", + "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group", + "MetricName": "tma_resource_bound", + "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1", + "MetricgroupNoGroup": "TopdownL2", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that result in retirement slots", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "Default;TopdownL1;tma_L1_group", + "MetricName": "tma_retiring", + "MetricThreshold": "tma_retiring > 0.75", + "MetricgroupNoGroup": "TopdownL1;Default", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)", + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", + "MetricName": "tma_serialization", + "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)", + "ScaleUnit": "100%", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Uncore frequency per die [GHZ]", + "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9", + "MetricGroup": "SoC", + "MetricName": "UNCORE_FREQ", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.", + "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)", + "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", + "MetricName": "tma_alu_op_utilization", + "MetricThreshold": "tma_alu_op_utilization > 0.4", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists", + "MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots", + "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group", + "MetricName": "tma_assists", + "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", + "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.", + "MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_avx_assists", + "MetricThreshold": "tma_avx_assists > 0.1", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)", + "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group", + "MetricName": "tma_backend_bound", + "MetricThreshold": "tma_backend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)", + "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group", + "MetricName": "tma_bad_speculation", + "MetricThreshold": "tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", + "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", + "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB", + "MetricName": "tma_bottleneck_big_code", + "MetricThreshold": "tma_bottleneck_big_code > 20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA", + "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots)", + "MetricGroup": "BvBO;Ret", + "MetricName": "tma_bottleneck_branching_overhead", + "MetricThreshold": "tma_bottleneck_branching_overhead > 5", + "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation", + "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))", + "MetricGroup": "BvCB;Cor;tma_issueComp", + "MetricName": "tma_bottleneck_compute_bound_est", + "MetricThreshold": "tma_bottleneck_compute_bound_est > 20", + "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))", + "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW", + "MetricName": "tma_bottleneck_data_cache_memory_bandwidth", + "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks", + "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat", + "MetricName": "tma_bottleneck_data_cache_memory_latency", + "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20", + "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)", + "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code", + "MetricGroup": "BvFB;Fed;FetchBW;Frontend", + "MetricName": "tma_bottleneck_instruction_fetch_bw", + "MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of irregular execution (e.g", + "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS", + "MetricName": "tma_bottleneck_irregular_overhead", + "MetricThreshold": "tma_bottleneck_irregular_overhead > 10", + "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", + "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB", + "MetricName": "tma_bottleneck_memory_data_tlbs", + "MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20", + "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)", + "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))", + "MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn", + "MetricName": "tma_bottleneck_memory_synchronization", + "MetricThreshold": "tma_bottleneck_memory_synchronization > 10", + "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", + "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM", + "MetricName": "tma_bottleneck_mispredictions", + "MetricThreshold": "tma_bottleneck_mispredictions > 20", + "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end", + "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)", + "MetricGroup": "BvOB;Cor;Offcore", + "MetricName": "tma_bottleneck_other_bottlenecks", + "MetricThreshold": "tma_bottleneck_other_bottlenecks > 20", + "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.", + "MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)", + "MetricGroup": "BvUW;Ret", + "MetricName": "tma_bottleneck_useful_work", + "MetricThreshold": "tma_bottleneck_useful_work > 20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction", + "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)", + "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM", + "MetricName": "tma_branch_mispredicts", + "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_bottleneck_mispredictions, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers", + "MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches", + "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricName": "tma_branch_resteers", + "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c01_wait", + "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks", + "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_c02_wait", + "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction", + "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)", + "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group", + "MetricName": "tma_cisc", + "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)", + "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears", + "MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks", + "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC", + "MetricName": "tma_clears_resteers", + "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.", + "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@FRONTEND_RETIRED.L1I_MISS@R / tma_info_thread_clks - tma_code_l2_miss)", + "MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group", + "MetricName": "tma_code_l2_hit", + "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.", + "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@FRONTEND_RETIRED.L2_MISS@R / tma_info_thread_clks", + "MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group", + "MetricName": "tma_code_l2_miss", + "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) ITLB was missed by instructions fetches, that later on hit in second-level TLB (STLB)", + "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@FRONTEND_RETIRED.ITLB_MISS@R / tma_info_thread_clks - tma_code_stlb_miss)", + "MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group", + "MetricName": "tma_code_stlb_hit", + "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by instruction fetches, performing a hardware page walk", + "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@FRONTEND_RETIRED.STLB_MISS@R / tma_info_thread_clks", + "MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group", + "MetricName": "tma_code_stlb_miss", + "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.", + "MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)", + "MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group", + "MetricName": "tma_code_stlb_miss_2m", + "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.", + "MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)", + "MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group", + "MetricName": "tma_code_stlb_miss_4k", + "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches.", + "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@R / tma_info_thread_clks", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_cond_nt_mispredicts", + "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by taken conditional branches.", + "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_COST@R / tma_info_thread_clks", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_cond_tk_mispredicts", + "MetricThreshold": "tma_cond_tk_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", + "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 25 * tma_info_system_core_frequency) * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", + "MetricName": "tma_contested_accesses", + "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck", + "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)", + "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group", + "MetricName": "tma_core_bound", + "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 24 * tma_info_system_core_frequency) * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", + "MetricName": "tma_data_sharing", + "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder", + "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2", + "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group", + "MetricName": "tma_decoder0_alone", + "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)", + "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active", + "MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks", + "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group", + "MetricName": "tma_divider", + "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads", + "MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks", + "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricName": "tma_dram_bound", + "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline", + "MetricExpr": "(cpu_core@IDQ.DSB_CYCLES_ANY@ - cpu_core@IDQ.DSB_CYCLES_OK@) / tma_info_core_core_clks / 2", + "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", + "MetricName": "tma_dsb", + "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2", + "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines", + "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks", + "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", + "MetricName": "tma_dsb_switches", + "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@R, 7) / tma_info_thread_clks + tma_load_stlb_miss", + "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group", + "MetricName": "tma_dtlb_load", + "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@R, 7) / tma_info_thread_clks + tma_store_stlb_miss", + "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group", + "MetricName": "tma_dtlb_store", + "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing", + "MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks", + "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group", + "MetricName": "tma_false_sharing", + "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed", + "MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks", + "MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", + "MetricName": "tma_fb_full", + "MetricThreshold": "tma_fb_full > 0.3", + "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues", + "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)", + "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB", + "MetricName": "tma_fetch_bandwidth", + "MetricThreshold": "tma_fetch_bandwidth > 0.2", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues", + "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots", + "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group", + "MetricName": "tma_fetch_latency", + "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops", + "MetricExpr": "max(0, tma_heavy_operations - tma_microcode_sequencer)", + "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0", + "MetricName": "tma_few_uops_instructions", + "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)", + "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector", + "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_fp_arith", + "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6", + "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists", + "MetricExpr": "30 * cpu_core@ASSISTS.FP@ / tma_info_thread_slots", + "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_fp_assists", + "MetricThreshold": "tma_fp_assists > 0.1", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.", + "MetricExpr": "cpu_core@ARITH.FPDIV_ACTIVE@ / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group", + "MetricName": "tma_fp_divider", + "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired", + "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", + "MetricName": "tma_fp_scalar", + "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths", + "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P", + "MetricName": "tma_fp_vector", + "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", + "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_128b", + "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P", + "MetricName": "tma_fp_vector_256b", + "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))", + "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots", + "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group", + "MetricName": "tma_frontend_bound", + "MetricThreshold": "tma_frontend_bound > 0.15", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions", + "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_fused_instructions", + "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences", + "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)", + "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group", + "MetricName": "tma_heavy_operations", + "MetricThreshold": "tma_heavy_operations > 0.1", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", + "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks", + "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricName": "tma_icache_misses", + "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions.", + "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R / tma_info_thread_clks", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_ind_call_mispredicts", + "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions.", + "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_COST@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R) / tma_info_thread_clks, 0)", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_ind_jump_mispredicts", + "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", + "MetricExpr": "tma_bottleneck_mispredictions * tma_info_thread_slots / 6 / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / 100", + "MetricGroup": "Bad;BrMispredicts;tma_issueBM", + "MetricName": "tma_info_bad_spec_branch_misprediction_cost", + "PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@", + "MetricGroup": "Bad;BrMispredicts", + "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken", + "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN@", + "MetricGroup": "Bad;BrMispredicts", + "MetricName": "tma_info_bad_spec_ipmisp_cond_taken", + "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@", + "MetricGroup": "Bad;BrMispredicts", + "MetricName": "tma_info_bad_spec_ipmisp_indirect", + "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@", + "MetricGroup": "Bad;BrMispredicts", + "MetricName": "tma_info_bad_spec_ipmisp_ret", + "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Bad;BadSpec;BrMispredicts", + "MetricName": "tma_info_bad_spec_ipmispredict", + "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Speculative to Retired ratio of all clears (covering Mispredicts and nukes)", + "MetricExpr": "cpu_core@INT_MISC.CLEARS_COUNT@ / (cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ + cpu_core@MACHINE_CLEARS.COUNT@)", + "MetricGroup": "BrMispredicts", + "MetricName": "tma_info_bad_spec_spec_clears_ratio", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", + "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", + "MetricGroup": "Cor;SMT", + "MetricName": "tma_info_botlnk_l0_core_bound_likely", + "MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck", + "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))", + "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", + "MetricName": "tma_info_botlnk_l2_dsb_bandwidth", + "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10", + "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck", + "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))", + "MetricGroup": "DSBmiss;Fed;tma_issueFB", + "MetricName": "tma_info_botlnk_l2_dsb_misses", + "MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10", + "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck", + "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", + "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL", + "MetricName": "tma_info_botlnk_l2_ic_misses", + "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5", + "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of branches that are CALL or RET", + "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@BR_INST_RETIRED.NEAR_RETURN@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Bad;Branches", + "MetricName": "tma_info_branches_callret", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of branches that are non-taken conditionals", + "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Bad;Branches;CodeGen;PGO", + "MetricName": "tma_info_branches_cond_nt", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of branches that are taken conditionals", + "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Bad;Branches;CodeGen;PGO", + "MetricName": "tma_info_branches_cond_tk", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps", + "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ - cpu_core@BR_INST_RETIRED.COND_TAKEN@ - 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Bad;Branches", + "MetricName": "tma_info_branches_jump", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)", + "MetricExpr": "1 - (tma_info_branches_cond_nt + tma_info_branches_cond_tk + tma_info_branches_callret + tma_info_branches_jump)", + "MetricGroup": "Bad;Branches", + "MetricName": "tma_info_branches_other_branches", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core", + "MetricExpr": "(cpu_core@CPU_CLK_UNHALTED.DISTRIBUTED@ if #SMT_on else tma_info_thread_clks)", + "MetricGroup": "SMT", + "MetricName": "tma_info_core_core_clks", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_core_core_clks", + "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group", + "MetricName": "tma_info_core_coreipc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "uops Executed per Cycle", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / tma_info_thread_clks", + "MetricGroup": "Power", + "MetricName": "tma_info_core_epc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_core_core_clks", + "MetricGroup": "Flops;Ret", + "MetricName": "tma_info_core_flopc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)", + "MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.PORT_0@ + cpu_core@FP_ARITH_DISPATCHED.PORT_1@ + cpu_core@FP_ARITH_DISPATCHED.PORT_5@) / (2 * tma_info_core_core_clks)", + "MetricGroup": "Cor;Flops;HPC", + "MetricName": "tma_info_core_fp_arith_utilization", + "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@", + "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", + "MetricName": "tma_info_core_ilp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@UOPS_ISSUED.ANY@", + "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", + "MetricName": "tma_info_frontend_dsb_coverage", + "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35", + "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.", + "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@", + "MetricGroup": "DSBmiss", + "MetricName": "tma_info_frontend_dsb_switch_cost", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired DSB misses", + "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@R / tma_info_thread_clks", + "MetricGroup": "DSBmiss;Fed;FetchLat", + "MetricName": "tma_info_frontend_dsb_switches_ret", + "MetricThreshold": "tma_info_frontend_dsb_switches_ret > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of Uops issued by front-end when it issued something", + "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@", + "MetricGroup": "Fed;FetchBW", + "MetricName": "tma_info_frontend_fetch_upc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Latency for L1 instruction cache misses", + "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALL_PERIODS@", + "MetricGroup": "Fed;FetchLat;IcMiss", + "MetricName": "tma_info_frontend_icache_miss_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@", + "MetricGroup": "DSBmiss;Fed", + "MetricName": "tma_info_frontend_ipdsb_miss_ret", + "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@BACLEARS.ANY@", + "MetricGroup": "Fed", + "MetricName": "tma_info_frontend_ipunknown_branch", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache true code cacheline misses per kilo instruction", + "MetricExpr": "1e3 * cpu_core@FRONTEND_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "IcMiss", + "MetricName": "tma_info_frontend_l2mpki_code", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.CODE_RD_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "IcMiss", + "MetricName": "tma_info_frontend_l2mpki_code_all", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)", + "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@UOPS_ISSUED.ANY@", + "MetricGroup": "Fed;LSD", + "MetricName": "tma_info_frontend_lsd_coverage", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired operations that invoke the Microcode Sequencer", + "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@FRONTEND_RETIRED.MS_FLOWS@R / tma_info_thread_clks", + "MetricGroup": "Fed;FetchLat;MicroSeq", + "MetricName": "tma_info_frontend_ms_latency_ret", + "MetricThreshold": "tma_info_frontend_ms_latency_ret > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Taken Branches retired Per Cycle", + "MetricExpr": "cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ / tma_info_thread_clks", + "MetricGroup": "Branches;FetchBW", + "MetricName": "tma_info_frontend_tbpc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection", + "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@", + "MetricGroup": "Fed", + "MetricName": "tma_info_frontend_unknown_branch_cost", + "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired branches who got branch address clears", + "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@R / tma_info_thread_clks", + "MetricGroup": "Fed;FetchLat", + "MetricName": "tma_info_frontend_unknown_branches_ret", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Branch instructions per taken branch.", + "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@", + "MetricGroup": "Branches;Fed;PGO", + "MetricName": "tma_info_inst_mix_bptkbranch", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Summary;TmaL1;tma_L1_group", + "MetricName": "tma_info_inst_mix_instructions", + "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@)", + "MetricGroup": "Flops;InsType", + "MetricName": "tma_info_inst_mix_iparith", + "MetricThreshold": "tma_info_inst_mix_iparith < 10", + "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_iparith_avx128", + "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", + "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_iparith_avx256", + "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", + "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@", + "MetricGroup": "Flops;FpScalar;InsType", + "MetricName": "tma_info_inst_mix_iparith_scalar_dp", + "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", + "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@", + "MetricGroup": "Flops;FpScalar;InsType", + "MetricName": "tma_info_inst_mix_iparith_scalar_sp", + "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", + "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@", + "MetricGroup": "Branches;Fed;InsType", + "MetricName": "tma_info_inst_mix_ipbranch", + "MetricThreshold": "tma_info_inst_mix_ipbranch < 8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_CALL@", + "MetricGroup": "Branches;Fed;PGO", + "MetricName": "tma_info_inst_mix_ipcall", + "MetricThreshold": "tma_info_inst_mix_ipcall < 200", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", + "MetricGroup": "Flops;InsType", + "MetricName": "tma_info_inst_mix_ipflop", + "MetricThreshold": "tma_info_inst_mix_ipflop < 10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_LOADS@", + "MetricGroup": "InsType", + "MetricName": "tma_info_inst_mix_ipload", + "MetricThreshold": "tma_info_inst_mix_ipload < 3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)", + "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@CPU_CLK_UNHALTED.PAUSE_INST@", + "MetricGroup": "Flops;FpVector;InsType", + "MetricName": "tma_info_inst_mix_ippause", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@", + "MetricGroup": "InsType", + "MetricName": "tma_info_inst_mix_ipstore", + "MetricThreshold": "tma_info_inst_mix_ipstore < 8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@SW_PREFETCH_ACCESS.ANY@", + "MetricGroup": "Prefetches", + "MetricName": "tma_info_inst_mix_ipswpf", + "MetricThreshold": "tma_info_inst_mix_ipswpf < 100", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per taken branch", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@", + "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB", + "MetricName": "tma_info_inst_mix_iptb", + "MetricThreshold": "tma_info_inst_mix_iptb < 13", + "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "tma_info_memory_l1d_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "tma_info_memory_l2_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "tma_info_memory_l3_cache_access_bw", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "tma_info_memory_l3_cache_fill_bw", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_fb_hpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]", + "MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / tma_info_system_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l1d_cache_fill_bw", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_l1mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.ALL_DEMAND_DATA_RD@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_l1mpki_load", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]", + "MetricExpr": "64 * cpu_core@L2_LINES_IN.ALL@ / 1e9 / tma_info_system_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l2_cache_fill_bw", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricExpr": "1e3 * (cpu_core@L2_RQSTS.REFERENCES@ - cpu_core@L2_RQSTS.MISS@) / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_l2hpki_all", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_HIT@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_l2hpki_load", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Backend;CacheHits;Mem", + "MetricName": "tma_info_memory_l2mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem;Offcore", + "MetricName": "tma_info_memory_l2mpki_all", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheHits;Mem", + "MetricName": "tma_info_memory_l2mpki_load", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs", + "MetricExpr": "1e3 * cpu_core@L2_RQSTS.RFO_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "CacheMisses;Offcore", + "MetricName": "tma_info_memory_l2mpki_rfo", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * cpu_core@OFFCORE_REQUESTS.ALL_REQUESTS@ / 1e9 / tma_info_system_time", + "MetricGroup": "Mem;MemoryBW;Offcore", + "MetricName": "tma_info_memory_l3_cache_access_bw", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]", + "MetricExpr": "64 * cpu_core@LONGEST_LAT_CACHE.MISS@ / 1e9 / tma_info_system_time", + "MetricGroup": "Mem;MemoryBW", + "MetricName": "tma_info_memory_l3_cache_fill_bw", + "Unit": "cpu_core" + }, + { + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_l3mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Parallel L2 cache miss data reads", + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_data_l2_mlp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Latency for L2 cache miss demand Loads", + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.DEMAND_DATA_RD@", + "MetricGroup": "LockCont;Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_miss_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Parallel L2 cache miss demand Loads", + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", + "MetricGroup": "Memory_BW;Offcore", + "MetricName": "tma_info_memory_latency_load_l2_mlp", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Latency for L3 cache miss demand Loads", + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD@", + "MetricGroup": "Memory_Lat;Offcore", + "MetricName": "tma_info_memory_latency_load_l3_miss_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@MEM_LOAD_COMPLETED.L1_MISS_ANY@", + "MetricGroup": "Mem;MemoryBound;MemoryLat", + "MetricName": "tma_info_memory_load_miss_real_latency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "\"Bus lock\" per kilo instruction", + "MetricExpr": "1e3 * cpu_core@SQ_MISC.BUS_LOCK@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_bus_lock_pki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Un-cacheable retired load per kilo instruction", + "MetricExpr": "1e3 * cpu_core@MEM_LOAD_MISC_RETIRED.UC@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Mem", + "MetricName": "tma_info_memory_mix_uc_load_pki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@L1D_PEND_MISS.PENDING_CYCLES@", + "MetricGroup": "Mem;MemoryBW;MemoryBound", + "MetricName": "tma_info_memory_mlp", + "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Rate of L2 HW prefetched lines that were not used by demand accesses", + "MetricExpr": "cpu_core@L2_LINES_OUT.USELESS_HWPF@ / (cpu_core@L2_LINES_OUT.SILENT@ + cpu_core@L2_LINES_OUT.NON_SILENT@)", + "MetricGroup": "Prefetches", + "MetricName": "tma_info_memory_prefetches_useless_hwpf", + "MetricThreshold": "tma_info_memory_prefetches_useless_hwpf > 0.15", + "Unit": "cpu_core" + }, + { + "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "1e3 * cpu_core@ITLB_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Fed;MemoryTLB", + "MetricName": "tma_info_memory_tlb_code_stlb_mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand loads", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@R / tma_info_thread_clks", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_tlb_load_stlb_miss_ret", + "MetricThreshold": "tma_info_memory_tlb_load_stlb_miss_ret > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "1e3 * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_tlb_load_stlb_mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricExpr": "(cpu_core@ITLB_MISSES.WALK_PENDING@ + cpu_core@DTLB_LOAD_MISSES.WALK_PENDING@ + cpu_core@DTLB_STORE_MISSES.WALK_PENDING@) / (4 * tma_info_core_core_clks)", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_tlb_page_walks_utilization", + "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand stores", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@R / tma_info_thread_clks", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_tlb_store_stlb_miss_ret", + "MetricThreshold": "tma_info_memory_tlb_store_stlb_miss_ret > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)", + "MetricExpr": "1e3 * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Mem;MemoryTLB", + "MetricName": "tma_info_memory_tlb_store_stlb_mpki", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mem;Backend;CacheHits", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)", + "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", + "MetricName": "tma_info_pipeline_execute", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of uops fetched from DSB per cycle", + "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@IDQ.DSB_CYCLES_ANY@", + "MetricGroup": "Fed;FetchBW", + "MetricName": "tma_info_pipeline_fetch_dsb", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of uops fetched from LSD per cycle", + "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@LSD.CYCLES_ACTIVE@", + "MetricGroup": "Fed;FetchBW", + "MetricName": "tma_info_pipeline_fetch_lsd", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of uops fetched from MITE per cycle", + "MetricExpr": "cpu_core@IDQ.MITE_UOPS@ / cpu_core@IDQ.MITE_CYCLES_ANY@", + "MetricGroup": "Fed;FetchBW", + "MetricName": "tma_info_pipeline_fetch_mite", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per a microcode Assist invocation", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@", + "MetricGroup": "MicroSeq;Pipeline;Ret;Retire", + "MetricName": "tma_info_pipeline_ipassist", + "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", + "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.", + "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@", + "MetricGroup": "Pipeline;Ret", + "MetricName": "tma_info_pipeline_retire", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions", + "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@", + "MetricGroup": "MicroSeq;Pipeline;Ret", + "MetricName": "tma_info_pipeline_strings_cycles", + "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C0_WAIT@ / tma_info_thread_clks", + "MetricGroup": "C0Wait", + "MetricName": "tma_info_system_c0_wait", + "MetricThreshold": "tma_info_system_c0_wait > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]", + "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc\\,cpu=cpu_core@ / 1e9 / tma_info_system_time", + "MetricGroup": "Power;Summary", + "MetricName": "tma_info_system_core_frequency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average CPU Utilization (percentage)", + "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online", + "MetricGroup": "HPC;Summary", + "MetricName": "tma_info_system_cpu_utilization", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of utilized CPUs", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_core@", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_cpus_utilized", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricExpr": "64 * (UNC_HAC_ARB_TRK_REQUESTS.ALL + UNC_HAC_ARB_COH_TRK_REQUESTS.ALL) / 1e9 / tma_info_system_time", + "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW", + "MetricName": "tma_info_system_dram_bw_use", + "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Giga Floating Point Operations Per Second", + "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / tma_info_system_time", + "MetricGroup": "Cor;Flops;HPC", + "MetricName": "tma_info_system_gflops", + "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u", + "MetricGroup": "Branches;OS", + "MetricName": "tma_info_system_ipfarbranch", + "MetricThreshold": "tma_info_system_ipfarbranch < 1e6", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k", + "MetricGroup": "OS", + "MetricName": "tma_info_system_kernel_cpi", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@", + "MetricGroup": "OS", + "MetricName": "tma_info_system_kernel_utilization", + "MetricThreshold": "tma_info_system_kernel_utilization > 0.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average number of parallel data read requests to external memory", + "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@", + "MetricGroup": "Mem;MemoryBW;SoC", + "MetricName": "tma_info_system_mem_parallel_reads", + "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "Unit": "cpu_core" + }, + { + "BriefDescription": "PerfMon Event Multiplexing accuracy indicator", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@ / cpu_core@CPU_CLK_UNHALTED.THREAD@", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_mux", + "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total package Power in Watts", + "MetricExpr": "power@energy\\-pkg@ * 61 / (tma_info_system_time * 1e6)", + "MetricGroup": "Power;SoC", + "MetricName": "tma_info_system_power", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active", + "MetricExpr": "(1 - cpu_core@CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE@ / cpu_core@CPU_CLK_UNHALTED.REF_DISTRIBUTED@ if #SMT_on else 0)", + "MetricGroup": "SMT", + "MetricName": "tma_info_system_smt_2t_utilization", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricExpr": "UNC_CLOCK.SOCKET", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_socket_clks", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Run duration time in seconds", + "MetricExpr": "duration_time", + "MetricGroup": "Summary", + "MetricName": "tma_info_system_time", + "MetricThreshold": "tma_info_system_time < 1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Average Frequency Utilization relative nominal frequency", + "MetricExpr": "tma_info_thread_clks / cpu_core@CPU_CLK_UNHALTED.REF_TSC@", + "MetricGroup": "Power", + "MetricName": "tma_info_system_turbo_utilization", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]", + "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_uncore_frequency", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@", + "MetricGroup": "Pipeline", + "MetricName": "tma_info_thread_clks", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles Per Instruction (per Logical Processor)", + "MetricExpr": "1 / tma_info_thread_ipc", + "MetricGroup": "Mem;Pipeline", + "MetricName": "tma_info_thread_cpi", + "Unit": "cpu_core" + }, + { + "BriefDescription": "The ratio of Executed- by Issued-Uops", + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@", + "MetricGroup": "Cor;Pipeline", + "MetricName": "tma_info_thread_execute_per_issue", + "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instructions Per Cycle (per Logical Processor)", + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_thread_clks", + "MetricGroup": "Ret;Summary", + "MetricName": "tma_info_thread_ipc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)", + "MetricExpr": "cpu_core@TOPDOWN.SLOTS@", + "MetricGroup": "TmaL1;tma_L1_group", + "MetricName": "tma_info_thread_slots", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor", + "MetricExpr": "(tma_info_thread_slots / (cpu_core@TOPDOWN.SLOTS@ / 2) if #SMT_on else 1)", + "MetricGroup": "SMT;TmaL1;tma_L1_group", + "MetricName": "tma_info_thread_slots_utilization", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops Per Instruction", + "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@INST_RETIRED.ANY@", + "MetricGroup": "Pipeline;Ret;Retire", + "MetricName": "tma_info_thread_uoppi", + "MetricThreshold": "tma_info_thread_uoppi > 1.05", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops per taken branch", + "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@", + "MetricGroup": "Branches;Fed;FetchBW", + "MetricName": "tma_info_thread_uptb", + "MetricThreshold": "tma_info_thread_uptb < 9", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.", + "MetricExpr": "tma_divider - tma_fp_divider", + "MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group", + "MetricName": "tma_int_divider", + "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)", + "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b", + "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_int_operations", + "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6", + "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", + "MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@) / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P", + "MetricName": "tma_int_vector_128b", + "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired", + "MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P", + "MetricName": "tma_int_vector_256b", + "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", + "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks", + "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", + "MetricName": "tma_itlb_misses", + "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache", + "MetricExpr": "max((cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@) / tma_info_thread_clks, 0)", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group", + "MetricName": "tma_l1_bound", + "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache", + "MetricExpr": "min(2 * (cpu_core@MEM_INST_RETIRED.ALL_LOADS@ - cpu_core@MEM_LOAD_RETIRED.FB_HIT@ - cpu_core@MEM_LOAD_RETIRED.L1_MISS@) * 20 / 100, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks", + "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group", + "MetricName": "tma_l1_latency_dependency", + "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", + "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks", + "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricName": "tma_l2_bound", + "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited)", + "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@R, 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group", + "MetricName": "tma_l2_hit_latency", + "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", + "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks", + "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricName": "tma_l3_bound", + "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)", + "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@R, 9 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", + "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group", + "MetricName": "tma_l3_hit_latency", + "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", + "MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks", + "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", + "MetricName": "tma_lcp", + "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)", + "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)", + "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group", + "MetricName": "tma_light_operations", + "MetricThreshold": "tma_light_operations > 0.6", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations", + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_2_3_10@ / (3 * tma_info_core_core_clks)", + "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", + "MetricName": "tma_load_op_utilization", + "MetricThreshold": "tma_load_op_utilization > 0.6", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)", + "MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)", + "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group", + "MetricName": "tma_load_stlb_hit", + "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk", + "MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks", + "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group", + "MetricName": "tma_load_stlb_miss", + "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.", + "MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group", + "MetricName": "tma_load_stlb_miss_1g", + "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.", + "MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group", + "MetricName": "tma_load_stlb_miss_2m", + "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.", + "MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group", + "MetricName": "tma_load_stlb_miss_4k", + "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@MEM_INST_RETIRED.LOCK_LOADS@R / tma_info_thread_clks", + "MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group", + "MetricName": "tma_lock_latency", + "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit", + "MetricExpr": "(cpu_core@LSD.CYCLES_ACTIVE@ - cpu_core@LSD.CYCLES_OK@) / tma_info_core_core_clks / 2", + "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", + "MetricName": "tma_lsd", + "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2", + "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears", + "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)", + "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn", + "MetricName": "tma_machine_clears", + "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)", + "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks", + "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW", + "MetricName": "tma_mem_bandwidth", + "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)", + "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth", + "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat", + "MetricName": "tma_mem_latency", + "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck", + "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)", + "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group", + "MetricName": "tma_memory_bound", + "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2", + "MetricgroupNoGroup": "TopdownL2", + "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.", + "MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_memory_fence", + "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.", + "MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_memory_operations", + "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", + "MetricExpr": "cpu_core@UOPS_RETIRED.MS@ / tma_info_thread_slots", + "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", + "MetricName": "tma_microcode_sequencer", + "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", + "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage", + "MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks", + "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM", + "MetricName": "tma_mispredicts_resteers", + "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)", + "MetricExpr": "(cpu_core@IDQ.MITE_CYCLES_ANY@ - cpu_core@IDQ.MITE_CYCLES_OK@) / tma_info_core_core_clks / 2", + "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", + "MetricName": "tma_mite", + "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2", + "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)", + "MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks", + "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group", + "MetricName": "tma_mixing_vectors", + "MetricThreshold": "tma_mixing_vectors > 0.05", + "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.", + "MetricExpr": "max(cpu_core@IDQ.MS_CYCLES_ANY@, cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@)) / tma_info_core_core_clks / 2.4", + "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", + "MetricName": "tma_ms", + "MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)", + "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks", + "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO", + "MetricName": "tma_ms_switches", + "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", + "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused", + "MetricExpr": "tma_light_operations * (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ - cpu_core@INST_RETIRED.MACRO_FUSED@) / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_non_fused_branches", + "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions", + "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", + "MetricName": "tma_nop_instructions", + "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", + "MetricConstraint": "NO_GROUP_EVENTS", + "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))", + "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", + "MetricName": "tma_other_light_ops", + "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6", + "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).", + "MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)", + "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_other_mispredicts", + "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.", + "MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)", + "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group", + "MetricName": "tma_other_nukes", + "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults", + "MetricExpr": "99 * cpu_core@ASSISTS.PAGE_FAULT@ / tma_info_thread_slots", + "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group", + "MetricName": "tma_page_faults", + "MetricThreshold": "tma_page_faults > 0.05", + "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_0@ / tma_info_core_core_clks", + "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", + "MetricName": "tma_port_0", + "MetricThreshold": "tma_port_0 > 0.6", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)", + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_1@ / tma_info_core_core_clks", + "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", + "MetricName": "tma_port_1", + "MetricThreshold": "tma_port_1 > 0.6", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)", + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks", + "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", + "MetricName": "tma_port_6", + "MetricThreshold": "tma_port_6 > 0.6", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)", + "MetricConstraint": "NO_GROUP_EVENTS_NMI", + "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@) / tma_info_thread_clks)", + "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group", + "MetricName": "tma_ports_utilization", + "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)", + "MetricConstraint": "NO_THRESHOLD_AND_NMI", + "MetricExpr": "max(cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ - cpu_core@RESOURCE_STALLS.SCOREBOARD@, 0) / tma_info_thread_clks", + "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", + "MetricName": "tma_ports_utilized_0", + "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", + "MetricConstraint": "NO_THRESHOLD_AND_NMI", + "MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks", + "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group", + "MetricName": "tma_ports_utilized_1", + "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", + "MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks", + "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", + "MetricName": "tma_ports_utilized_2", + "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", + "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks", + "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", + "MetricName": "tma_ports_utilized_3m", + "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions.", + "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@BR_MISP_RETIRED.RET_COST@R / tma_info_thread_clks", + "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group", + "MetricName": "tma_ret_mispredicts", + "MetricThreshold": "tma_ret_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "DefaultMetricgroupName": "TopdownL1", + "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)", + "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group", + "MetricName": "tma_retiring", + "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1", + "MetricgroupNoGroup": "TopdownL1;Default", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", + "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait", + "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO", + "MetricName": "tma_serializing_operation", + "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer)", + "MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)", + "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group", + "MetricName": "tma_shuffles_256b", + "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)", + "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group", + "MetricName": "tma_slow_pause", + "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", + "MetricName": "tma_split_loads", + "MetricThreshold": "tma_split_loads > 0.3", + "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents rate of split store accesses", + "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@R, 1) / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group", + "MetricName": "tma_split_stores", + "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)", + "MetricExpr": "(cpu_core@XQ.FULL_CYCLES@ + cpu_core@L1D_PEND_MISS.L2_STALLS@) / tma_info_thread_clks", + "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group", + "MetricName": "tma_sq_full", + "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write", + "MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks", + "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", + "MetricName": "tma_store_bound", + "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", + "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores", + "MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks", + "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", + "MetricName": "tma_store_fwd_blk", + "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses", + "MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks", + "MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group", + "MetricName": "tma_store_latency", + "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations", + "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_4_9@ + cpu_core@UOPS_DISPATCHED.PORT_7_8@) / (4 * tma_info_core_core_clks)", + "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", + "MetricName": "tma_store_op_utilization", + "MetricThreshold": "tma_store_op_utilization > 0.6", + "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)", + "MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)", + "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group", + "MetricName": "tma_store_stlb_hit", + "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk", + "MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks", + "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group", + "MetricName": "tma_store_stlb_miss", + "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.", + "MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group", + "MetricName": "tma_store_stlb_miss_1g", + "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.", + "MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group", + "MetricName": "tma_store_stlb_miss_2m", + "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.", + "MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)", + "MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group", + "MetricName": "tma_store_stlb_miss_4k", + "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores", + "MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks", + "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group", + "MetricName": "tma_streaming_stores", + "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", + "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", + "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks", + "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", + "MetricName": "tma_unknown_branches", + "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))", + "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH", + "ScaleUnit": "100%", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This metric serves as an approximation of legacy x87 usage", + "MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@", + "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group", + "MetricName": "tma_x87_use", + "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)", + "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.", + "ScaleUnit": "100%", + "Unit": "cpu_core" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/other.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/other.json new file mode 100644 index 000000000000..8320ffd83c51 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/other.json @@ -0,0 +1,90 @@ +[ + { + "BriefDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events. the event also counts for Machine Ordering count.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.HARDWARE", + "PublicDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events. This includes, but not limited to, assists at EXE or MEM uop writeback like AVX* load/store/gather/scatter (non-FP GSSE-assist ) , assists generated by ROB like PEBS and RTIT, Uncore trap, RAR (Remote Action Request) and CET (Control flow Enforcement Technology) assists. the event also counts for Machine Ordering count.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "ASSISTS.PAGE_FAULT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.PAGE_FAULT", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "EventCode": "0xe4", + "EventName": "LBR_INSERTS.ANY", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x800000010000", + "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x400000010000", + "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts streaming stores that have any type of response.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xB7", + "EventName": "OCR.STREAMING_WR.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10800", + "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts streaming stores that have any type of response.", + "Counter": "0,1,2,3", + "EventCode": "0x2A,0x2B", + "EventName": "OCR.STREAMING_WR.ANY_RESPONSE", + "MSRIndex": "0x1a6,0x1a7", + "MSRValue": "0x10800", + "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles the uncore cannot take further requests", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x2d", + "EventName": "XQ.FULL_CYCLES", + "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/pipeline.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/pipeline.json new file mode 100644 index 000000000000..09e1147c4733 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/pipeline.json @@ -0,0 +1,2096 @@ +[ + { + "BriefDescription": "Counts the number of cycles when any of the floating point or integer dividers are active.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.DIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.DIV_ACTIVE", + "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.", + "SampleAfterValue": "1000003", + "UMask": "0x9", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cycles when any of the integer dividers are active.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xcd", + "EventName": "ARITH.IDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event counts the cycles the integer divider is busy.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb0", + "EventName": "ARITH.IDIV_ACTIVE", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of active integer dividers per cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcd", + "EventName": "ARITH.IDIV_OCCUPANCY", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of integer divider uops executed per cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcd", + "EventName": "ARITH.IDIV_UOPS", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc1", + "EventName": "ASSISTS.ANY", + "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.", + "SampleAfterValue": "100003", + "UMask": "0x1b", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the total number of branch instructions retired for all branch types.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL012, MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.", + "SampleAfterValue": "200003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "All branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "SampleAfterValue": "200003", + "UMask": "0x7e", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND", + "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x11", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Not taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_NTAKEN", + "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "SampleAfterValue": "200003", + "UMask": "0xfe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Taken conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.COND_TAKEN", + "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "SampleAfterValue": "200003", + "UMask": "0xbf", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Far branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "SampleAfterValue": "200003", + "UMask": "0xeb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Indirect near branch instructions retired (excluding returns)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT", + "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT_CALL", + "SampleAfterValue": "200003", + "UMask": "0xfb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of near indirect JMP branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.INDIRECT_JMP", + "SampleAfterValue": "200003", + "UMask": "0xef", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.IND_CALL", + "SampleAfterValue": "200003", + "UMask": "0xfb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of near CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL012, MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "SampleAfterValue": "200003", + "UMask": "0xf9", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Direct and indirect near call instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of near RET branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "SampleAfterValue": "200003", + "UMask": "0xf7", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Return instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of near taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_TAKEN", + "SampleAfterValue": "200003", + "UMask": "0xc0", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.NEAR_TAKEN", + "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of near relative CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.REL_CALL", + "SampleAfterValue": "200003", + "UMask": "0xfd", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of near relative JMP branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.REL_JMP", + "SampleAfterValue": "200003", + "UMask": "0xdf", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "Errata": "MTL013", + "EventCode": "0xc4", + "EventName": "BR_INST_RETIRED.TAKEN", + "SampleAfterValue": "200003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.", + "SampleAfterValue": "200003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "All mispredicted branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", + "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "Unit": "cpu_core" + }, + { + "BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST", + "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x44", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND", + "SampleAfterValue": "200003", + "UMask": "0x7e", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Mispredicted conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND", + "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x11", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_COST", + "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x51", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_NTAKEN", + "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST", + "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x50", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN", + "SampleAfterValue": "200003", + "UMask": "0xfe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "number of branch instructions retired that were mispredicted and taken.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN", + "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.COND_TAKEN_COST", + "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x41", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT", + "SampleAfterValue": "200003", + "UMask": "0xeb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT", + "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL", + "SampleAfterValue": "200003", + "UMask": "0xfb", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Mispredicted indirect CALL retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL", + "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST", + "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x42", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_COST", + "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "100003", + "UMask": "0xc0", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.INDIRECT_JMP", + "SampleAfterValue": "200003", + "UMask": "0xef", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "SampleAfterValue": "200003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST", + "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "400009", + "UMask": "0x60", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RET", + "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RETURN", + "SampleAfterValue": "200003", + "UMask": "0xf7", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc5", + "EventName": "BR_MISP_RETIRED.RET_COST", + "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0", + "SampleAfterValue": "100007", + "UMask": "0x48", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C01", + "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C02", + "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.C0_WAIT", + "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.", + "SampleAfterValue": "2000003", + "UMask": "0x70", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]", + "Counter": "Fixed counter 1", + "EventName": "CPU_CLK_UNHALTED.CORE", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.CORE_P", + "SampleAfterValue": "2000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED", + "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", + "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.", + "SampleAfterValue": "25003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "CPU_CLK_UNHALTED.PAUSE", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.PAUSE", + "SampleAfterValue": "2000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xec", + "EventName": "CPU_CLK_UNHALTED.PAUSE_INST", + "SampleAfterValue": "2000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED", + "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.", + "Counter": "Fixed counter 2", + "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "SampleAfterValue": "2000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Reference cycles when the core is not in halt state.", + "Counter": "Fixed counter 2", + "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", + "SampleAfterValue": "2000003", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_TSC_P", + "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Reference cycles when the core is not in halt state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.REF_TSC_P", + "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]", + "Counter": "Fixed counter 1", + "EventName": "CPU_CLK_UNHALTED.THREAD", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Core cycles when the thread is not in halt state", + "Counter": "Fixed counter 1", + "EventName": "CPU_CLK_UNHALTED.THREAD", + "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.THREAD_P", + "SampleAfterValue": "2000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Thread cycles when thread is not in halt state", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x3c", + "EventName": "CPU_CLK_UNHALTED.THREAD_P", + "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.", + "SampleAfterValue": "2000003", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "8", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles while memory subsystem has an outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "16", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "12", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS", + "SampleAfterValue": "1000003", + "UMask": "0xc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.", + "Counter": "0,1,2,3", + "CounterMask": "5", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x5", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Total execution stalls.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xa3", + "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.1_PORTS_UTIL", + "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL", + "SampleAfterValue": "2000003", + "UMask": "0xc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.2_PORTS_UTIL", + "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.3_PORTS_UTIL", + "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.4_PORTS_UTIL", + "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "5", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS", + "SampleAfterValue": "2000003", + "UMask": "0x21", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.BOUND_ON_STORES", + "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa6", + "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS", + "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Instruction decoders utilized in a cycle", + "Counter": "0,1,2,3", + "EventCode": "0x75", + "EventName": "INST_DECODED.DECODERS", + "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Fixed Counter: Counts the number of instructions retired.", + "Counter": "Fixed counter 0", + "EventName": "INST_RETIRED.ANY", + "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event", + "Counter": "Fixed counter 0", + "EventName": "INST_RETIRED.ANY", + "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of instructions retired", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.ANY_P", + "SampleAfterValue": "2000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of instructions retired. General Counter - architectural event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.ANY_P", + "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.", + "SampleAfterValue": "2000003", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INST_RETIRED.MACRO_FUSED", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.MACRO_FUSED", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired NOP instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.NOP", + "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Precise instruction retired with PEBS precise-distribution", + "Counter": "Fixed counter 0", + "EventName": "INST_RETIRED.PREC_DIST", + "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Iterations of Repeat string retired instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc0", + "EventName": "INST_RETIRED.REP_ITERATION", + "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.", + "SampleAfterValue": "2000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Clears speculative count", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xad", + "EventName": "INT_MISC.CLEARS_COUNT", + "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears", + "SampleAfterValue": "500009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES", + "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.", + "SampleAfterValue": "500009", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.RECOVERY_CYCLES", + "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.", + "SampleAfterValue": "500009", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES", + "MSRIndex": "0x3F7", + "MSRValue": "0x7", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TMA slots where uops got dropped", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xad", + "EventName": "INT_MISC.UOP_DROPPING", + "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of uops executed on all Integer ports.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.ALL", + "SampleAfterValue": "1000003", + "UMask": "0xff", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on a load port.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.LD", + "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on integer port 0.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.P0", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on integer port 1.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.P1", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on integer port 2.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.P2", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on integer port 3.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.P3", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.PRIMARY", + "SampleAfterValue": "1000003", + "UMask": "0x78", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on a Store address port.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.STA", + "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb3", + "EventName": "INT_UOPS_EXECUTED.STD_JMP", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "INT_VEC_RETIRED.128BIT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.128BIT", + "SampleAfterValue": "1000003", + "UMask": "0x13", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INT_VEC_RETIRED.256BIT", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.256BIT", + "SampleAfterValue": "1000003", + "UMask": "0xac", + "Unit": "cpu_core" + }, + { + "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.ADD_128", + "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_core" + }, + { + "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.ADD_256", + "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.", + "SampleAfterValue": "1000003", + "UMask": "0xc", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INT_VEC_RETIRED.MUL_256", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.MUL_256", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INT_VEC_RETIRED.SHUFFLES", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.SHUFFLES", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INT_VEC_RETIRED.VNNI_128", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.VNNI_128", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "INT_VEC_RETIRED.VNNI_256", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe7", + "EventName": "INT_VEC_RETIRED.VNNI_256", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.ADDRESS_ALIAS", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "False dependencies in MOB due to partial compare on address.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.ADDRESS_ALIAS", + "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.DATA_UNKNOWN", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.NO_SR", + "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", + "SampleAfterValue": "100003", + "UMask": "0x88", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.", + "Counter": "0,1,2,3", + "EventCode": "0x03", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.", + "SampleAfterValue": "100003", + "UMask": "0x82", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.", + "Counter": "0,1,2,3", + "EventCode": "0x4c", + "EventName": "LOAD_HIT_PREFETCH.SWPF", + "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xa8", + "EventName": "LSD.CYCLES_ACTIVE", + "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "6", + "EventCode": "0xa8", + "EventName": "LSD.CYCLES_OK", + "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of Uops delivered by the LSD.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa8", + "EventName": "LSD.UOPS", + "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.ANY", + "SampleAfterValue": "20003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of machine clears (nukes) of any type.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.COUNT", + "PublicDescription": "Counts the number of machine clears (nukes) of any type.", + "SampleAfterValue": "100003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.DISAMBIGUATION", + "SampleAfterValue": "20003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.FAST", + "SampleAfterValue": "20003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of virtual traps taken.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.FPC_VIRTUAL_TRAP", + "SampleAfterValue": "20003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machines clears due to memory renaming.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.MRN_NUKE", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.PAGE_FAULT", + "SampleAfterValue": "20003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated.", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SLOW", + "SampleAfterValue": "20003", + "UMask": "0x6f", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SMC", + "SampleAfterValue": "20003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Self-modifying code (SMC) detected.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc3", + "EventName": "MACHINE_CLEARS.SMC", + "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "LFENCE instructions retired", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe0", + "EventName": "MISC2_RETIRED.LFENCE", + "PublicDescription": "number of LFENCE retired instructions", + "SampleAfterValue": "400009", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe4", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Increments whenever there is an update to the LBR array.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xcc", + "EventName": "MISC_RETIRED.LBR_INSERTS", + "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe0", + "EventName": "MISC_RETIRED1.CL_INST", + "SampleAfterValue": "1000003", + "UMask": "0xff", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of LFENCE instructions retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe0", + "EventName": "MISC_RETIRED1.LFENCE", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of accesses to KeyLocker cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe1", + "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of misses to KeyLocker cache.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xe1", + "EventName": "MISC_RETIRED2.KEYLOCKER_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x11", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa2", + "EventName": "RESOURCE_STALLS.SB", + "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa2", + "EventName": "RESOURCE_STALLS.SCOREBOARD", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa5", + "EventName": "RS.EMPTY", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", + "SampleAfterValue": "1000003", + "UMask": "0x7", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EdgeDetect": "1", + "EventCode": "0xa5", + "EventName": "RS.EMPTY_COUNT", + "Invert": "1", + "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)", + "SampleAfterValue": "100003", + "UMask": "0x7", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa5", + "EventName": "RS.EMPTY_RESOURCE", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x75", + "EventName": "SERIALIZATION.C01_MS_SCB", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number issue slots not consumed due to a color request for an FCW or MXCSR control register when all 4 colors (copies) are already in use.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x75", + "EventName": "SERIALIZATION.COLOR_STALLS", + "SampleAfterValue": "200003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x75", + "EventName": "SERIALIZATION.NON_C01_MS_SCB", + "PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS", + "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "10000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TMA slots wasted due to incorrect speculations.", + "Counter": "0", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BAD_SPEC_SLOTS", + "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.", + "SampleAfterValue": "10000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions", + "Counter": "0", + "EventCode": "0xa4", + "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS", + "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.", + "SampleAfterValue": "10000003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS", + "SampleAfterValue": "10000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event", + "Counter": "Fixed counter 3", + "EventName": "TOPDOWN.SLOTS", + "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).", + "SampleAfterValue": "10000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xa4", + "EventName": "TOPDOWN.SLOTS_P", + "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.", + "SampleAfterValue": "10000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P", + "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS", + "SampleAfterValue": "1000003", + "UMask": "0x3", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x73", + "EventName": "TOPDOWN_BAD_SPECULATION.NUKE", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALL", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REGISTER", + "SampleAfterValue": "1000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x74", + "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ALL", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER", + "SampleAfterValue": "1000003", + "UMask": "0x40", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.CISC", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.DECODE", + "SampleAfterValue": "1000003", + "UMask": "0x8", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH", + "SampleAfterValue": "1000003", + "UMask": "0x8d", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY", + "SampleAfterValue": "1000003", + "UMask": "0x72", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]", + "Counter": "0,1,2,3,4,5,6,7", + "Deprecated": "1", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.ITLB_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.OTHER", + "SampleAfterValue": "1000003", + "UMask": "0x80", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x71", + "EventName": "TOPDOWN_FE_BOUND.PREDECODE", + "SampleAfterValue": "1000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL_P]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x72", + "EventName": "TOPDOWN_RETIRING.ALL", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL]", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x72", + "EventName": "TOPDOWN_RETIRING.ALL_P", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of non dec-by-all uops decoded by decoder", + "Counter": "0,1,2,3", + "EventCode": "0x76", + "EventName": "UOPS_DECODED.DEC0_UOPS", + "PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on port 0", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_0", + "PublicDescription": "Number of uops dispatch to execution port 0.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on port 1", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_1", + "PublicDescription": "Number of uops dispatch to execution port 1.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on ports 2, 3 and 10", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_2_3_10", + "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on ports 4 and 9", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_4_9", + "PublicDescription": "Number of uops dispatch to execution ports 4 and 9", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on ports 5 and 11", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_5_11", + "PublicDescription": "Number of uops dispatch to execution ports 5 and 11", + "SampleAfterValue": "2000003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on port 6", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_6", + "PublicDescription": "Number of uops dispatch to execution port 6.", + "SampleAfterValue": "2000003", + "UMask": "0x40", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Uops executed on ports 7 and 8", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb2", + "EventName": "UOPS_DISPATCHED.PORT_7_8", + "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.", + "SampleAfterValue": "2000003", + "UMask": "0x80", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Number of uops executed on the core.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE", + "PublicDescription": "Counts the number of uops executed from any thread.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", + "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", + "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "3", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3", + "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", + "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where at least 1 uop was executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_1", + "PublicDescription": "Cycles where at least 1 uop was executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where at least 2 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "2", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_2", + "PublicDescription": "Cycles where at least 2 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where at least 3 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "3", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_3", + "PublicDescription": "Cycles where at least 3 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles where at least 4 uops were executed per-thread", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "4", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.CYCLES_GE_4", + "PublicDescription": "Cycles where at least 4 uops were executed per-thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.STALLS", + "Invert": "1", + "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.THREAD", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of x87 uops dispatched.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xb1", + "EventName": "UOPS_EXECUTED.X87", + "PublicDescription": "Counts the number of x87 uops executed.", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of uops issued by the front end every cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x0e", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.", + "SampleAfterValue": "1000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Uops that RAT issues to RS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xae", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "UOPS_ISSUED.CYCLES", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xae", + "EventName": "UOPS_ISSUED.CYCLES", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the total number of uops retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.ALL", + "SampleAfterValue": "2000003", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Cycles with retired uop(s).", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.CYCLES", + "PublicDescription": "Counts cycles where at least one uop has retired.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Retired uops except the last uop of each instruction.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.HEAVY", + "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of integer divide uops retired.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.IDIV", + "SampleAfterValue": "2000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.MS", + "SampleAfterValue": "2000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "UOPS_RETIRED.MS", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.MS", + "MSRIndex": "0x3F7", + "MSRValue": "0x8", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.SLOTS", + "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles without actually retired uops.", + "Counter": "0,1,2,3,4,5,6,7", + "CounterMask": "1", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.STALLS", + "Invert": "1", + "PublicDescription": "This event counts cycles without actually retired uops.", + "SampleAfterValue": "1000003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0xc2", + "EventName": "UOPS_RETIRED.X87", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-cache.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-cache.json new file mode 100644 index 000000000000..f294852dfbe6 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-cache.json @@ -0,0 +1,20 @@ +[ + { + "BriefDescription": "Number of all entries allocated. Includes also retries.", + "Counter": "0,1", + "EventCode": "0x35", + "EventName": "UNC_HAC_CBO_TOR_ALLOCATION.ALL", + "PerPkg": "1", + "UMask": "0x8", + "Unit": "HAC_CBO" + }, + { + "BriefDescription": "Asserted on coherent DRD + DRdPref allocations into the queue. Cacheable only", + "Counter": "0,1", + "EventCode": "0x35", + "EventName": "UNC_HAC_CBO_TOR_ALLOCATION.DRD", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "HAC_CBO" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-interconnect.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-interconnect.json new file mode 100644 index 000000000000..a2f4386a8379 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-interconnect.json @@ -0,0 +1,66 @@ +[ + { + "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.", + "Counter": "0", + "EventCode": "0x85", + "EventName": "UNC_ARB_DAT_OCCUPANCY.RD", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "ARB" + }, + { + "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.", + "Counter": "0,1", + "EventCode": "0x84", + "EventName": "UNC_HAC_ARB_COH_TRK_REQUESTS.ALL", + "Experimental": "1", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "HAC_ARB" + }, + { + "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches", + "Counter": "0,1", + "EventCode": "0x81", + "EventName": "UNC_HAC_ARB_REQ_TRK_REQUEST.DRD", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "HAC_ARB" + }, + { + "BriefDescription": "Number of all CMI transactions", + "Counter": "0,1", + "EventCode": "0x8A", + "EventName": "UNC_HAC_ARB_TRANSACTIONS.ALL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "HAC_ARB" + }, + { + "BriefDescription": "Number of all CMI reads", + "Counter": "0,1", + "EventCode": "0x8A", + "EventName": "UNC_HAC_ARB_TRANSACTIONS.READS", + "PerPkg": "1", + "UMask": "0x2", + "Unit": "HAC_ARB" + }, + { + "BriefDescription": "Number of all CMI writes not including Mflush", + "Counter": "0,1", + "EventCode": "0x8A", + "EventName": "UNC_HAC_ARB_TRANSACTIONS.WRITES", + "PerPkg": "1", + "UMask": "0x4", + "Unit": "HAC_ARB" + }, + { + "BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.", + "Counter": "0,1", + "EventCode": "0x81", + "EventName": "UNC_HAC_ARB_TRK_REQUESTS.ALL", + "PerPkg": "1", + "UMask": "0x1", + "Unit": "HAC_ARB" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-memory.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-memory.json new file mode 100644 index 000000000000..ceb8839f0767 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-memory.json @@ -0,0 +1,160 @@ +[ + { + "BriefDescription": "Counts every CAS read command sent from the Memory Controller 0 to DRAM (sum of all channels).", + "Counter": "0", + "EventCode": "0xff", + "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every CAS read command sent from the Memory Controller 0 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.", + "UMask": "0x20", + "Unit": "imc_free_running_0" + }, + { + "BriefDescription": "Counts every read and write request entering the Memory Controller 0.", + "Counter": "2", + "EventCode": "0xff", + "EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every read and write request entering the Memory Controller 0 (sum of all channels). All requests are counted as one, whether they are 32B or 64B Read/Write or partial/full line writes. Some write requests to the same address may merge to a single write command to DRAM. Therefore, the total request count may be higher than total DRAM BW.", + "UMask": "0x10", + "Unit": "imc_free_running_0" + }, + { + "BriefDescription": "Counts every CAS write command sent from the Memory Controller 0 to DRAM (sum of all channels).", + "Counter": "1", + "EventCode": "0xff", + "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every CAS write command sent from the Memory Controller 0 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.", + "UMask": "0x30", + "Unit": "imc_free_running_0" + }, + { + "BriefDescription": "Counts every CAS read command sent from the Memory Controller 1 to DRAM (sum of all channels).", + "Counter": "3", + "EventCode": "0xff", + "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every CAS read command sent from the Memory Controller 1 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.", + "UMask": "0x20", + "Unit": "imc_free_running_1" + }, + { + "BriefDescription": "Counts every read and write request entering the Memory Controller 1.", + "Counter": "5", + "EventCode": "0xff", + "EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every read and write request entering the Memory Controller 1 (sum of all channels). All requests are counted as one, whether they are 32B or 64B Read/Write or partial/full line writes. Some write requests to the same address may merge to a single write command to DRAM. Therefore, the total request count may be higher than total DRAM BW.", + "UMask": "0x10", + "Unit": "imc_free_running_1" + }, + { + "BriefDescription": "Counts every CAS write command sent from the Memory Controller 1 to DRAM (sum of all channels).", + "Counter": "4", + "EventCode": "0xff", + "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN", + "PerPkg": "1", + "PublicDescription": "Counts every CAS write command sent from the Memory Controller 1 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.", + "UMask": "0x30", + "Unit": "imc_free_running_1" + }, + { + "BriefDescription": "ACT command for a read request sent to DRAM", + "Counter": "0,1,2,3,4", + "EventCode": "0x24", + "EventName": "UNC_M_ACT_COUNT_RD", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "ACT command sent to DRAM", + "Counter": "0,1,2,3,4", + "EventCode": "0x26", + "EventName": "UNC_M_ACT_COUNT_TOTAL", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "ACT command for a write request sent to DRAM", + "Counter": "0,1,2,3,4", + "EventCode": "0x25", + "EventName": "UNC_M_ACT_COUNT_WR", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Read CAS command sent to DRAM", + "Counter": "0,1,2,3,4", + "EventCode": "0x22", + "EventName": "UNC_M_CAS_COUNT_RD", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Write CAS command sent to DRAM", + "Counter": "0,1,2,3,4", + "EventCode": "0x23", + "EventName": "UNC_M_CAS_COUNT_WR", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Any Rank at Hot state", + "Counter": "0,1,2,3,4", + "EventCode": "0x19", + "EventName": "UNC_M_DRAM_THERMAL_HOT", + "Experimental": "1", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Any Rank at Warm state", + "Counter": "0,1,2,3,4", + "EventCode": "0x1A", + "EventName": "UNC_M_DRAM_THERMAL_WARM", + "Experimental": "1", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration", + "Counter": "0,1,2,3,4", + "EventCode": "0x28", + "EventName": "UNC_M_PRE_COUNT_IDLE", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "PRE command sent to DRAM for a read/write request", + "Counter": "0,1,2,3,4", + "EventCode": "0x27", + "EventName": "UNC_M_PRE_COUNT_PAGE_MISS", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Number of bytes read from DRAM, in 32B chunks. Counter increments by 1 after receiving 32B chunk data.", + "Counter": "0,1,2,3,4", + "EventCode": "0x3A", + "EventName": "UNC_M_RD_DATA", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Total number of read and write byte transfers to/from DRAM, in 32B chunks. Counter increments by 1 after sending or receiving 32B chunk data.", + "Counter": "0,1,2,3,4", + "EventCode": "0x3C", + "EventName": "UNC_M_TOTAL_DATA", + "PerPkg": "1", + "Unit": "iMC" + }, + { + "BriefDescription": "Number of bytes written to DRAM, in 32B chunks. Counter increments by 1 after sending 32B chunk data.", + "Counter": "0,1,2,3,4", + "EventCode": "0x3B", + "EventName": "UNC_M_WR_DATA", + "PerPkg": "1", + "Unit": "iMC" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-other.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-other.json new file mode 100644 index 000000000000..b3f9c588b410 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/uncore-other.json @@ -0,0 +1,10 @@ +[ + { + "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.", + "Counter": "FIXED", + "EventCode": "0xff", + "EventName": "UNC_CLOCK.SOCKET", + "PerPkg": "1", + "Unit": "CNCU" + } +] diff --git a/lib/libpmc/pmu-events/arch/x86/meteorlake/virtual-memory.json b/lib/libpmc/pmu-events/arch/x86/meteorlake/virtual-memory.json new file mode 100644 index 000000000000..04396c7b3e08 --- /dev/null +++ b/lib/libpmc/pmu-events/arch/x86/meteorlake/virtual-memory.json @@ -0,0 +1,377 @@ +[ + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "SampleAfterValue": "200003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Loads that miss the DTLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", + "SampleAfterValue": "200003", + "UMask": "0xe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 1G page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G", + "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Page walks completed due to a demand data load to a 4K page.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x08", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x12", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Stores that miss the DTLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", + "SampleAfterValue": "2000003", + "UMask": "0xe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 1G page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G", + "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x8", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Page walks completed due to a demand data store to a 4K page.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x49", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.", + "SampleAfterValue": "200003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x13", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.MISS_CAUSED_WALK", + "SampleAfterValue": "1000003", + "UMask": "0x1", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.STLB_HIT", + "SampleAfterValue": "2000003", + "UMask": "0x20", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.STLB_HIT", + "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).", + "SampleAfterValue": "100003", + "UMask": "0x20", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.", + "Counter": "0,1,2,3", + "CounterMask": "1", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_ACTIVE", + "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.", + "SampleAfterValue": "200003", + "UMask": "0xe", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED", + "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0xe", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x4", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x4", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.", + "SampleAfterValue": "2000003", + "UMask": "0x2", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.", + "SampleAfterValue": "100003", + "UMask": "0x2", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x85", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.", + "SampleAfterValue": "200003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.", + "Counter": "0,1,2,3", + "EventCode": "0x11", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.", + "SampleAfterValue": "100003", + "UMask": "0x10", + "Unit": "cpu_core" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a DTLB miss", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.DTLB_MISS", + "SampleAfterValue": "1000003", + "UMask": "0x10", + "Unit": "cpu_atom" + }, + { + "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.", + "Counter": "0,1,2,3,4,5,6,7", + "EventCode": "0x05", + "EventName": "LD_HEAD.DTLB_MISS_AT_RET", + "SampleAfterValue": "1000003", + "UMask": "0x90", + "Unit": "cpu_atom" + } +] diff --git a/lib/libpmc/pmu-events/jevents.c b/lib/libpmc/pmu-events/jevents.c index b129b3706832..facec3361c70 100644 --- a/lib/libpmc/pmu-events/jevents.c +++ b/lib/libpmc/pmu-events/jevents.c @@ -71,8 +71,11 @@ struct json_event { char *perpkg; char *aggr_mode; char *metric_expr; + char *metric_threshold; char *metric_name; char *metric_group; + char *metric_group_nogroup; + char *default_metric_group; char *deprecated; char *metric_constraint; }; @@ -379,10 +382,16 @@ static int print_events_table_entry(void *data, struct json_event *je) fprintf(outfp, "\t.aggr_mode = \"%d\",\n", convert(je->aggr_mode)); if (je->metric_expr) fprintf(outfp, "\t.metric_expr = \"%s\",\n", je->metric_expr); + if (je->metric_threshold) + fprintf(outfp, "\t.metric_threshold = \"%s\",\n", je->metric_threshold); if (je->metric_name) fprintf(outfp, "\t.metric_name = \"%s\",\n", je->metric_name); if (je->metric_group) fprintf(outfp, "\t.metric_group = \"%s\",\n", je->metric_group); + if (je->metric_group_nogroup) + fprintf(outfp, "\t.metric_group_nogroup = \"%s\",\n", je->metric_group_nogroup); + if (je->default_metric_group) + fprintf(outfp, "\t.default_metric_group = \"%s\",\n", je->default_metric_group); if (je->deprecated) fprintf(outfp, "\t.deprecated = \"%s\",\n", je->deprecated); if (je->metric_constraint) @@ -404,8 +413,11 @@ struct event_struct { char *perpkg; char *aggr_mode; char *metric_expr; + char *metric_threshold; char *metric_name; char *metric_group; + char *metric_group_nogroup; + char *default_metric_group; char *deprecated; char *metric_constraint; }; @@ -434,8 +446,11 @@ struct event_struct { op(perpkg); \ op(aggr_mode); \ op(metric_expr); \ + op(metric_threshold); \ op(metric_name); \ op(metric_group); \ + op(metric_group_nogroup); \ + op(default_metric_group); \ op(deprecated); \ } while (0) @@ -711,10 +726,16 @@ static int json_events(const char *fn, addfield(map, &je.metric_name, "", "", val); } else if (json_streq(map, field, "MetricGroup")) { addfield(map, &je.metric_group, "", "", val); + } else if (json_streq(map, field, "MetricgroupNoGroup")) { + addfield(map, &je.metric_group_nogroup, "", "", val); + } else if (json_streq(map, field, "DefaultMetricgroupName")) { + addfield(map, &je.default_metric_group, "", "", val); } else if (json_streq(map, field, "MetricConstraint")) { addfield(map, &je.metric_constraint, "", "", val); } else if (json_streq(map, field, "MetricExpr")) { addfield(map, &je.metric_expr, "", "", val); + } else if (json_streq(map, field, "MetricThreshold")) { + addfield(map, &je.metric_threshold, "", "", val); } else if (json_streq(map, field, "ArchStdEvent")) { addfield(map, &arch_std, "", "", val); for (s = arch_std; *s; s++) @@ -866,8 +887,11 @@ free_strings: free(je.deprecated); free(je.unit); free(je.metric_expr); + free(je.metric_threshold); free(je.metric_name); free(je.metric_group); + free(je.metric_group_nogroup); + free(je.default_metric_group); free(je.metric_constraint); free(arch_std); diff --git a/lib/libpmc/pmu-events/pmu-events.h b/lib/libpmc/pmu-events/pmu-events.h index d9edbfba7ceb..1a158a5f16f2 100644 --- a/lib/libpmc/pmu-events/pmu-events.h +++ b/lib/libpmc/pmu-events/pmu-events.h @@ -24,8 +24,11 @@ struct pmu_event { const char *perpkg; const char *aggr_mode; const char *metric_expr; + const char *metric_threshold; const char *metric_name; const char *metric_group; + const char *metric_group_nogroup; + const char *default_metric_group; const char *deprecated; const char *metric_constraint; }; diff --git a/lib/librpcsec_gss/svc_rpcsec_gss.c b/lib/librpcsec_gss/svc_rpcsec_gss.c index e9d39a813f86..73b92371e6d0 100644 --- a/lib/librpcsec_gss/svc_rpcsec_gss.c +++ b/lib/librpcsec_gss/svc_rpcsec_gss.c @@ -758,6 +758,14 @@ svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, memset(rpchdr, 0, sizeof(rpchdr)); + oa = &msg->rm_call.cb_cred; + + if (oa->oa_length > sizeof(rpchdr) - 8 * BYTES_PER_XDR_UNIT) { + log_debug("auth length %d exceeds maximum", oa->oa_length); + client->cl_state = CLIENT_STALE; + return (FALSE); + } + /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); @@ -766,7 +774,6 @@ svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); - oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { diff --git a/lib/libsys/dup.2 b/lib/libsys/dup.2 index 524fd7688670..9c1e3e8d648b 100644 --- a/lib/libsys/dup.2 +++ b/lib/libsys/dup.2 @@ -81,7 +81,10 @@ object reference to the file must be obtained by issuing an additional .Xr open 2 system call. -The close-on-exec flag on the new file descriptor is unset. +The close-on-exec and close-on-fork flags on the new file descriptor +are unset. +The resolve-beneath flag on the new file descriptor is set to the same +state as on the old file descriptor. .Pp In .Fn dup2 , diff --git a/lib/libsys/kldload.2 b/lib/libsys/kldload.2 index 63a13c328d58..55dd16300af0 100644 --- a/lib/libsys/kldload.2 +++ b/lib/libsys/kldload.2 @@ -23,7 +23,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd March 3, 1999 +.Dd March 26, 2026 .Dt KLDLOAD 2 .Os .Sh NAME @@ -39,7 +39,20 @@ The .Fn kldload system call -loads a kld file into the kernel using the kernel linker. +loads a kld file specified by +.Fa file +into the kernel using the kernel linker (see +.Xr kld 4 ) . +The +.Fa file +can be specified as a full or relative path, or otherwise is searched +within the module path as defined by the +loader tunable and sysctl variable +.Va kern.module_path . +The .ko +extension for +.Fa file +is not mandatory. .Sh RETURN VALUES The .Fn kldload diff --git a/lib/msun/aarch64/fenv.c b/lib/msun/aarch64/fenv.c index cce9f33e4f30..4c54656be7d3 100644 --- a/lib/msun/aarch64/fenv.c +++ b/lib/msun/aarch64/fenv.c @@ -38,7 +38,12 @@ const fenv_t __fe_dfl_env = 0; #error "This file must be compiled with C99 'inline' semantics" #endif -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); extern inline int feraiseexcept(int __excepts); diff --git a/lib/msun/aarch64/fenv.h b/lib/msun/aarch64/fenv.h index a435a9de5223..d125978b887b 100644 --- a/lib/msun/aarch64/fenv.h +++ b/lib/msun/aarch64/fenv.h @@ -81,8 +81,11 @@ extern const fenv_t __fe_dfl_env; #define __mrs_fpsr(__r) __asm __volatile("mrs %0, fpsr" : "=r" (__r)) #define __msr_fpsr(__r) __asm __volatile("msr fpsr, %0" : : "r" (__r)) -__fenv_static __inline int -feclearexcept(int __excepts) +int feclearexcept(int); +#define feclearexcept(a) __feclearexcept_int(a) + +__fenv_static inline int +__feclearexcept_int(int __excepts) { fexcept_t __r; diff --git a/lib/msun/amd64/fenv.c b/lib/msun/amd64/fenv.c index 4d271f8d456a..cd3b83d11585 100644 --- a/lib/msun/amd64/fenv.c +++ b/lib/msun/amd64/fenv.c @@ -46,7 +46,12 @@ const fenv_t __fe_dfl_env = { __INITIAL_MXCSR__ }; -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); int diff --git a/lib/msun/arm/fenv.c b/lib/msun/arm/fenv.c index 05b3adb05f81..c8edf3fef037 100644 --- a/lib/msun/arm/fenv.c +++ b/lib/msun/arm/fenv.c @@ -70,7 +70,12 @@ const fenv_t __fe_dfl_env = 0; #error "This file must be compiled with C99 'inline' semantics" #endif -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); extern inline int feraiseexcept(int __excepts); diff --git a/lib/msun/arm/fenv.h b/lib/msun/arm/fenv.h index e8a30fcf496f..14638dd33aa7 100644 --- a/lib/msun/arm/fenv.h +++ b/lib/msun/arm/fenv.h @@ -111,8 +111,11 @@ int fegetexcept(void); #define _FPU_MASK_SHIFT 8 +int feclearexcept(int); +#define feclearexcept(a) __feclearexcept_int(a) + __fenv_static inline int -feclearexcept(int __excepts) +__feclearexcept_int(int __excepts) { fexcept_t __fpsr; diff --git a/lib/msun/i387/fenv.c b/lib/msun/i387/fenv.c index ebb4111a5fa6..e0485a3597f6 100644 --- a/lib/msun/i387/fenv.c +++ b/lib/msun/i387/fenv.c @@ -88,7 +88,12 @@ __test_sse(void) return (0); } -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); int diff --git a/lib/msun/powerpc/fenv.c b/lib/msun/powerpc/fenv.c index cc0b518bb7c4..bcf78d5c096e 100644 --- a/lib/msun/powerpc/fenv.c +++ b/lib/msun/powerpc/fenv.c @@ -35,7 +35,12 @@ const fenv_t __fe_dfl_env = 0x00000000; -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); extern inline int feraiseexcept(int __excepts); diff --git a/lib/msun/powerpc/fenv.h b/lib/msun/powerpc/fenv.h index 09fd4caafb43..74a71ef39e5a 100644 --- a/lib/msun/powerpc/fenv.h +++ b/lib/msun/powerpc/fenv.h @@ -111,8 +111,11 @@ union __fpscr { } __bits; }; +int feclearexcept(int); +#define feclearexcept(a) __feclearexcept_int(a) + __fenv_static inline int -feclearexcept(int __excepts) +__feclearexcept_int(int __excepts) { union __fpscr __r; diff --git a/lib/msun/riscv/fenv.c b/lib/msun/riscv/fenv.c index a4dde02a6ddc..4d1b2cb2f611 100644 --- a/lib/msun/riscv/fenv.c +++ b/lib/msun/riscv/fenv.c @@ -37,7 +37,12 @@ */ const fenv_t __fe_dfl_env = 0; -extern inline int feclearexcept(int __excepts); +int +(feclearexcept)(int excepts) +{ + return (__feclearexcept_int(excepts)); +} + extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); extern inline int feraiseexcept(int __excepts); diff --git a/lib/msun/riscv/fenv.h b/lib/msun/riscv/fenv.h index fd50463d479e..4e8f81aa04c1 100644 --- a/lib/msun/riscv/fenv.h +++ b/lib/msun/riscv/fenv.h @@ -79,8 +79,11 @@ extern const fenv_t __fe_dfl_env; #define __rfs(__fcsr) __asm __volatile("csrr %0, fcsr" : "=r" (__fcsr)) #define __wfs(__fcsr) __asm __volatile("csrw fcsr, %0" :: "r" (__fcsr)) +int feclearexcept(int); +#define feclearexcept(a) __feclearexcept_int(a) + __fenv_static inline int -feclearexcept(int __excepts) +__feclearexcept_int(int __excepts) { __asm __volatile("csrc fflags, %0" :: "r"(__excepts)); diff --git a/lib/msun/x86/fenv.h b/lib/msun/x86/fenv.h index e558d0372368..b806222e5ef6 100644 --- a/lib/msun/x86/fenv.h +++ b/lib/msun/x86/fenv.h @@ -143,6 +143,9 @@ fegetexcept(void) #endif /* __BSD_VISIBLE */ +int feclearexcept(int); +#define feclearexcept(a) __feclearexcept_int(a) + #ifdef __i386__ /* After testing for SSE support once, we cache the result in __has_sse. */ @@ -164,7 +167,7 @@ int __test_sse(void); } while (0) __fenv_static inline int -feclearexcept(int __excepts) +__feclearexcept_int(int __excepts) { fenv_t __env; __uint32_t __mxcsr; @@ -262,7 +265,7 @@ fesetenv(const fenv_t *__envp) #else /* __amd64__ */ __fenv_static inline int -feclearexcept(int __excepts) +__feclearexcept_int(int __excepts) { fenv_t __env; diff --git a/libexec/Makefile b/libexec/Makefile index bfcd55b255c7..a5e3ea655a9e 100644 --- a/libexec/Makefile +++ b/libexec/Makefile @@ -14,6 +14,7 @@ SUBDIR= ${_atf} \ ${_makewhatis.local} \ ${_mknetid} \ ${_phttpget} \ + ${_pkgserve} \ ${_pppoed} \ rc \ revnetgroup \ diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c index 5e15ba996ec8..e0de6d2e2214 100644 --- a/libexec/rtld-elf/rtld.c +++ b/libexec/rtld-elf/rtld.c @@ -187,6 +187,10 @@ static char *origin_subst_one(Obj_Entry *, char *, const char *, const char *, static char *origin_subst(Obj_Entry *, const char *); static bool obj_resolve_origin(Obj_Entry *obj); static void preinit_main(void); +static void rtld_recalc_bind_not(const char *); +static void rtld_recalc_dangerous_ld_env(void); +static void rtld_recalc_debug(const char *); +static void rtld_recalc_path_rpath(const char *); static int rtld_verify_versions(const Objlist *); static int rtld_verify_object_versions(Obj_Entry *); static void object_add_name(Obj_Entry *, const char *); @@ -198,6 +202,17 @@ static uint32_t gnu_hash(const char *); static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *, const unsigned long); +struct ld_env_var_desc; +static void rtld_set_var_bind_not(struct ld_env_var_desc *lvd); +static void rtld_set_var_bind_now(struct ld_env_var_desc *lvd); +static void rtld_set_var_debug(struct ld_env_var_desc *lvd); +static void rtld_set_var_dynamic_weak(struct ld_env_var_desc *lvd); +static void rtld_set_var_libmap_disable(struct ld_env_var_desc *lvd); +static void rtld_set_var_library_path(struct ld_env_var_desc *lvd); +static void rtld_set_var_library_path_fds(struct ld_env_var_desc *lvd); +static void rtld_set_var_library_path_rpath(struct ld_env_var_desc *lvd); +static void rtld_set_var_loadfltr(struct ld_env_var_desc *lvd); + void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported; void _r_debug_postinit(struct link_map *) __noinline __exported; @@ -215,7 +230,6 @@ static bool dangerous_ld_env; /* True if environment variables have been used to affect the libraries loaded */ bool ld_bind_not; /* Disable PLT update */ static const char *ld_bind_now; /* Environment variable for immediate binding */ -static const char *ld_debug; /* Environment variable for debugging */ static bool ld_dynamic_weak = true; /* True if non-weak definition overrides weak definition */ static const char *ld_library_path; /* Environment variable for search path */ @@ -368,26 +382,35 @@ struct ld_env_var_desc { const char *val; const bool unsecure : 1; const bool can_update : 1; - const bool debug : 1; bool owned : 1; + void (*const on_update)(struct ld_env_var_desc *); }; #define LD_ENV_DESC(var, unsec, ...) \ [LD_##var] = { .n = #var, .unsecure = unsec, __VA_ARGS__ } static struct ld_env_var_desc ld_env_vars[] = { - LD_ENV_DESC(BIND_NOW, false), + LD_ENV_DESC(BIND_NOW, false, .can_update = true, + .on_update = rtld_set_var_bind_now), LD_ENV_DESC(PRELOAD, true), LD_ENV_DESC(LIBMAP, true), - LD_ENV_DESC(LIBRARY_PATH, true, .can_update = true), - LD_ENV_DESC(LIBRARY_PATH_FDS, true, .can_update = true), - LD_ENV_DESC(LIBMAP_DISABLE, true), - LD_ENV_DESC(BIND_NOT, true), - LD_ENV_DESC(DEBUG, true, .can_update = true, .debug = true), + LD_ENV_DESC(LIBRARY_PATH, true, .can_update = true, + .on_update = rtld_set_var_library_path), + LD_ENV_DESC(LIBRARY_PATH_FDS, true, .can_update = true, + .on_update = rtld_set_var_library_path_fds), + LD_ENV_DESC(LIBMAP_DISABLE, true, .can_update = true, + .on_update = rtld_set_var_libmap_disable), + LD_ENV_DESC(BIND_NOT, true, .can_update = true, + .on_update = rtld_set_var_bind_not), + LD_ENV_DESC(DEBUG, true, .can_update = true, + .on_update = rtld_set_var_debug), LD_ENV_DESC(ELF_HINTS_PATH, true), - LD_ENV_DESC(LOADFLTR, true), - LD_ENV_DESC(LIBRARY_PATH_RPATH, true, .can_update = true), + LD_ENV_DESC(LOADFLTR, true, .can_update = true, + .on_update = rtld_set_var_loadfltr), + LD_ENV_DESC(LIBRARY_PATH_RPATH, true, .can_update = true, + .on_update = rtld_set_var_library_path_rpath), LD_ENV_DESC(PRELOAD_FDS, true), - LD_ENV_DESC(DYNAMIC_WEAK, true, .can_update = true), + LD_ENV_DESC(DYNAMIC_WEAK, true, .can_update = true, + .on_update = rtld_set_var_dynamic_weak), LD_ENV_DESC(TRACE_LOADED_OBJECTS, false), LD_ENV_DESC(UTRACE, false, .can_update = true), LD_ENV_DESC(DUMP_REL_PRE, false, .can_update = true), @@ -516,7 +539,7 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) struct stat st; Elf_Addr *argcp; char **argv, **env, **envp, *kexecpath; - const char *argv0, *binpath, *library_path_rpath, *static_tls_extra; + const char *argv0, *binpath, *static_tls_extra; struct ld_env_var_desc *lvd; caddr_t imgentry; char buf[MAXPATHLEN]; @@ -721,9 +744,8 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) } } - ld_debug = ld_get_env_var(LD_DEBUG); - if (ld_bind_now == NULL) - ld_bind_not = ld_get_env_var(LD_BIND_NOT) != NULL; + rtld_recalc_debug(ld_get_env_var(LD_DEBUG)); + rtld_recalc_bind_not(ld_get_env_var(LD_BIND_NOT)); ld_dynamic_weak = ld_get_env_var(LD_DYNAMIC_WEAK) == NULL; libmap_disable = ld_get_env_var(LD_LIBMAP_DISABLE) != NULL; libmap_override = ld_get_env_var(LD_LIBMAP); @@ -733,31 +755,18 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) ld_preload_fds = ld_get_env_var(LD_PRELOAD_FDS); ld_elf_hints_path = ld_get_env_var(LD_ELF_HINTS_PATH); ld_loadfltr = ld_get_env_var(LD_LOADFLTR) != NULL; - library_path_rpath = ld_get_env_var(LD_LIBRARY_PATH_RPATH); - if (library_path_rpath != NULL) { - if (library_path_rpath[0] == 'y' || - library_path_rpath[0] == 'Y' || - library_path_rpath[0] == '1') - ld_library_path_rpath = true; - else - ld_library_path_rpath = false; - } + rtld_recalc_path_rpath(ld_get_env_var(LD_LIBRARY_PATH_RPATH)); static_tls_extra = ld_get_env_var(LD_STATIC_TLS_EXTRA); if (static_tls_extra != NULL && static_tls_extra[0] != '\0') { sz = parse_integer(static_tls_extra); if (sz >= RTLD_STATIC_TLS_EXTRA && sz <= SIZE_T_MAX) ld_static_tls_extra = sz; } - dangerous_ld_env = libmap_disable || libmap_override != NULL || - ld_library_path != NULL || ld_preload != NULL || - ld_elf_hints_path != NULL || ld_loadfltr || !ld_dynamic_weak || - static_tls_extra != NULL; + rtld_recalc_dangerous_ld_env(); ld_tracing = ld_get_env_var(LD_TRACE_LOADED_OBJECTS); ld_utrace = ld_get_env_var(LD_UTRACE); set_ld_elf_hints_path(); - if (ld_debug != NULL && *ld_debug != '\0') - debug = 1; dbg("%s is initialized, base address = %p", __progname, (caddr_t)aux_info[AT_BASE]->a_un.a_ptr); dbg("RTLD dynamic = %p", obj_rtld.dynamic); @@ -6611,18 +6620,121 @@ rtld_get_var(const char *name) return (NULL); } +static void +rtld_recalc_dangerous_ld_env(void) +{ + /* + * Never reset dangerous_ld_env back to false if rtld was ever + * contaminated with it set to true. + */ + dangerous_ld_env |= libmap_disable || libmap_override != NULL || + ld_library_path != NULL || ld_preload != NULL || + ld_elf_hints_path != NULL || ld_loadfltr || !ld_dynamic_weak || + ld_get_env_var(LD_STATIC_TLS_EXTRA) != NULL; +} + +static void +rtld_recalc_debug(const char *ld_debug) +{ + if (ld_debug != NULL && *ld_debug != '\0') + debug = 1; +} + +static void +rtld_set_var_debug(struct ld_env_var_desc *lvd) +{ + rtld_recalc_debug(lvd->val); +} + +static void +rtld_set_var_library_path(struct ld_env_var_desc *lvd) +{ + ld_library_path = lvd->val; +} + +static void +rtld_set_var_library_path_fds(struct ld_env_var_desc *lvd) +{ + ld_library_dirs = lvd->val; +} + +static void +rtld_recalc_path_rpath(const char *library_path_rpath) +{ + if (library_path_rpath != NULL) { + if (library_path_rpath[0] == 'y' || + library_path_rpath[0] == 'Y' || + library_path_rpath[0] == '1') + ld_library_path_rpath = true; + else + ld_library_path_rpath = false; + } else { + ld_library_path_rpath = false; + } +} + +static void +rtld_set_var_library_path_rpath(struct ld_env_var_desc *lvd) +{ + rtld_recalc_path_rpath(lvd->val); +} + +static void +rtld_recalc_bind_not(const char *bind_not_val) +{ + if (ld_bind_now == NULL) + ld_bind_not = bind_not_val != NULL; +} + +static void +rtld_set_var_bind_now(struct ld_env_var_desc *lvd) +{ + ld_bind_now = lvd->val; + rtld_recalc_bind_not(ld_get_env_var(LD_BIND_NOT)); +} + +static void +rtld_set_var_bind_not(struct ld_env_var_desc *lvd) +{ + rtld_recalc_bind_not(lvd->val); +} + +static void +rtld_set_var_dynamic_weak(struct ld_env_var_desc *lvd) +{ + ld_dynamic_weak = lvd->val == NULL; +} + +static void +rtld_set_var_loadfltr(struct ld_env_var_desc *lvd) +{ + ld_loadfltr = lvd->val != NULL; +} + +static void +rtld_set_var_libmap_disable(struct ld_env_var_desc *lvd) +{ + libmap_disable = lvd->val != NULL; +} + int rtld_set_var(const char *name, const char *val) { + RtldLockState lockstate; struct ld_env_var_desc *lvd; u_int i; + int error; + error = ENOENT; + wlock_acquire(rtld_bind_lock, &lockstate); for (i = 0; i < nitems(ld_env_vars); i++) { lvd = &ld_env_vars[i]; if (strcmp(lvd->n, name) != 0) continue; - if (!lvd->can_update || (lvd->unsecure && !trust)) - return (EPERM); + if (!lvd->can_update || (lvd->unsecure && !trust)) { + error = EPERM; + break; + } if (lvd->owned) free(__DECONST(char *, lvd->val)); if (val != NULL) @@ -6630,11 +6742,15 @@ rtld_set_var(const char *name, const char *val) else lvd->val = NULL; lvd->owned = true; - if (lvd->debug) - debug = lvd->val != NULL && *lvd->val != '\0'; - return (0); + if (lvd->on_update != NULL) + lvd->on_update(lvd); + error = 0; + break; } - return (ENOENT); + if (error == 0) + rtld_recalc_dangerous_ld_env(); + lock_release(rtld_bind_lock, &lockstate); + return (error); } /* diff --git a/libexec/rtld-elf/tests/Makefile b/libexec/rtld-elf/tests/Makefile index c4b3baab4cb8..3c05b52b83bb 100644 --- a/libexec/rtld-elf/tests/Makefile +++ b/libexec/rtld-elf/tests/Makefile @@ -7,6 +7,7 @@ SUBDIR_DEPEND_target= libpythagoras ATF_TESTS_C= ld_library_pathfds ATF_TESTS_C+= ld_preload_fds +ATF_TESTS_C+= set_var_test .for t in ${ATF_TESTS_C} SRCS.$t= $t.c common.c diff --git a/libexec/rtld-elf/tests/set_var_test.c b/libexec/rtld-elf/tests/set_var_test.c new file mode 100644 index 000000000000..6279bd5ecb44 --- /dev/null +++ b/libexec/rtld-elf/tests/set_var_test.c @@ -0,0 +1,38 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2026 Alex S <iwtcex@gmail.com> + */ + +#include <atf-c.h> +#include <dlfcn.h> +#include <fcntl.h> +#include <link.h> +#include <stdio.h> + +ATF_TC_WITHOUT_HEAD(set_var_library_path_fds); +ATF_TC_BODY(set_var_library_path_fds, tc) +{ + void *handle; + char *pathfds; + int testdir; + + handle = dlopen("libpythagoras.so.0", RTLD_LAZY); + ATF_REQUIRE(handle == NULL); + + testdir = open(atf_tc_get_config_var(tc, "srcdir"), + O_RDONLY | O_DIRECTORY); + ATF_REQUIRE(testdir >= 0); + + ATF_REQUIRE(asprintf(&pathfds, "%d", testdir) > 0); + ATF_REQUIRE(rtld_set_var("LIBRARY_PATH_FDS", pathfds) == 0); + + handle = dlopen("libpythagoras.so.0", RTLD_LAZY); + ATF_REQUIRE(handle != NULL); +} + +ATF_TP_ADD_TCS(tp) +{ + ATF_TP_ADD_TC(tp, set_var_library_path_fds); + return atf_no_error(); +} diff --git a/release/packages/ucl/caroot.ucl b/release/packages/ucl/caroot.ucl index f7d0dd8acb7f..72c1105248c0 100644 --- a/release/packages/ucl/caroot.ucl +++ b/release/packages/ucl/caroot.ucl @@ -5,5 +5,5 @@ deps { } scripts: { post-install = "/usr/sbin/certctl -D${PKG_ROOTDIR}/ ${PKG_METALOG:+-U -M $PKG_METALOG} rehash" - post-uninstall = "/usr/sbin/certctl -D${PKG_ROOTDIR}/ ${PKG_METALOG:+-U -M $PKG_METALOG} rehash" + post-deinstall = "/usr/sbin/certctl -D${PKG_ROOTDIR}/ ${PKG_METALOG:+-U -M $PKG_METALOG} rehash" } diff --git a/release/packages/ucl/ftpd-all.ucl b/release/packages/ucl/ftpd-all.ucl deleted file mode 100644 index cbaa078123d5..000000000000 --- a/release/packages/ucl/ftpd-all.ucl +++ /dev/null @@ -1,4 +0,0 @@ -comment = "FTP Daemon" -desc = <<EOD -FTP Daemon -EOD diff --git a/release/packages/ucl/libsdp-all.ucl b/release/packages/ucl/libsdp-all.ucl deleted file mode 100644 index e4f848c3281c..000000000000 --- a/release/packages/ucl/libsdp-all.ucl +++ /dev/null @@ -1,28 +0,0 @@ -/* - * SPDX-License-Identifier: ISC - * - * Copyright (c) 2025 Lexi Winter <ivy@FreeBSD.org> - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -comment = "Bluetooth Service Discovery Protocol library" - -desc = <<EOD -libsdp allows applications to interact with the Bluetooth Service Discovery -Protocol. -EOD - -annotations { - set = "optional,optional-jail" -} diff --git a/release/packages/ucl/rc.ucl b/release/packages/ucl/rc.ucl index 854963a81ca1..b750c731469b 100644 --- a/release/packages/ucl/rc.ucl +++ b/release/packages/ucl/rc.ucl @@ -19,6 +19,9 @@ deps { "runtime": { version = "${VERSION}" - } + }, + # /etc/rc.d/var_run depends on mtree. + "mtree": { + version = "${VERSION}" + }, } - diff --git a/release/packages/ucl/xz-all.ucl b/release/packages/ucl/xz-all.ucl index 8a7c33bcce46..3523dc70ca55 100644 --- a/release/packages/ucl/xz-all.ucl +++ b/release/packages/ucl/xz-all.ucl @@ -25,5 +25,5 @@ applications to use this functionality programmatically. EOD annotations { - set = "optional,optional-jail" + set = "minimal,minimal-jail" } diff --git a/release/release.sh b/release/release.sh index 480d6b34f191..48c505470524 100755 --- a/release/release.sh +++ b/release/release.sh @@ -40,10 +40,27 @@ export PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin" VERSION=3 # Prototypes that can be redefined per-chroot or per-target. + +# load_chroot_env(): Set up the build environment needed. +# +# Done as part of chroot_env(). load_chroot_env() { } + +# load_target_env(): set up the build environment needed for the +# chroot_build_target() and `${chroot_build_release}` steps. load_target_env() { } + +# buildenv_setup(): set up the build environment needed for post-chroot_setup() buildenv_setup() { } +# chroot_cleanup(): Clean up resources setup in chroot_setup() at exit. +# +# This function can be built upon. `_chroot_cleanup` must be added to the end of +# the override function, if overridden. +chroot_cleanup() { + _chroot_cleanup +} # chroot_cleanup() + usage() { echo "Usage: $0 [-c release.conf]" exit 1 @@ -427,6 +444,18 @@ chroot_arm_build_release() { return 0 } # chroot_arm_build_release() +# chroot_cleanup(): Clean up resources setup in chroot_setup() at exit. +# +# This contains steps which must be executed at exit. +# +# Do not override this function: override `chroot_cleanup instead. +_chroot_cleanup() { + if [ -c "${CHROOTDIR}/dev/null" ]; then + echo "Unmounting /dev in ${CHROOTDIR}" + umount -f "${CHROOTDIR}/dev" + fi +} + # main(): Start here. main() { set -e # Everything must succeed @@ -451,7 +480,7 @@ main() { fi fi env_check - trap "umount ${CHROOTDIR}/dev" EXIT # Clean up devfs mount on exit + trap chroot_cleanup INT EXIT TERM chroot_setup extra_chroot_setup chroot_build_target diff --git a/release/tools/ec2-builder.conf b/release/tools/ec2-builder.conf index 3b0344f9eb9a..a272ea49a426 100644 --- a/release/tools/ec2-builder.conf +++ b/release/tools/ec2-builder.conf @@ -17,6 +17,7 @@ vm_extra_filter_base_packages() { -e '.*-dbg$' \ -e '.*-lib32$' \ -e '^FreeBSD-set-tests' + echo FreeBSD-clibs-lib32 } # Packages to install into the image we're creating. In addition to packages diff --git a/release/tools/ec2-small.conf b/release/tools/ec2-small.conf index 6564a59c2cf6..c1a05f98356f 100644 --- a/release/tools/ec2-small.conf +++ b/release/tools/ec2-small.conf @@ -20,6 +20,7 @@ vm_extra_filter_base_packages() { -e '.*-dbg$' \ -e '.*-lib32$' \ -e '^FreeBSD-set-tests' + echo FreeBSD-clibs-lib32 } # Packages to install into the image we're creating. In addition to packages diff --git a/release/tools/ec2.conf b/release/tools/ec2.conf index 4e1260903e06..dc1818219816 100644 --- a/release/tools/ec2.conf +++ b/release/tools/ec2.conf @@ -21,10 +21,6 @@ export VMSIZE=8000m export NOSWAP=YES ec2_common() { - # Delete the pkg package and the repo database; they will likely be - # long out of date before the EC2 instance is launched. In - # unprivileged builds this is unnecessary as pkg will not be - # installed to begin with. if [ -z "${NO_ROOT}" ]; then echo "ERROR: NO_ROOT not set" >&2 exit 1 diff --git a/release/tools/vmimage.subr b/release/tools/vmimage.subr index 97bf52205c93..56acbc359936 100644 --- a/release/tools/vmimage.subr +++ b/release/tools/vmimage.subr @@ -213,13 +213,6 @@ vm_extra_install_packages() { -r ${DESTDIR} \ install -y -r ${PKG_REPO_NAME} $pkg done - INSTALL_AS_USER=yes \ - ${PKG_CMD} \ - -o ABI=${PKG_ABI} \ - -o REPOS_DIR=${PKG_REPOS_DIR} \ - -o PKG_DBDIR=${DESTDIR}/var/db/pkg \ - -r ${DESTDIR} \ - autoremove -y if [ -n "${NOPKGBASE}" ]; then metalog_add_data ./var/db/pkg/local.sqlite fi diff --git a/sbin/dmesg/dmesg.8 b/sbin/dmesg/dmesg.8 index f9b9fce82ffc..d84587b61475 100644 --- a/sbin/dmesg/dmesg.8 +++ b/sbin/dmesg/dmesg.8 @@ -25,12 +25,12 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd May 7, 2022 +.Dd March 7, 2026 .Dt DMESG 8 .Os .Sh NAME .Nm dmesg -.Nd "display the system message buffer" +.Nd display the kernel message buffer .Sh SYNOPSIS .Nm .Op Fl ac @@ -38,7 +38,7 @@ .Sh DESCRIPTION The .Nm -utility displays the contents of the system message buffer. +utility displays the contents of the kernel message buffer. If the .Fl M option is not specified, the buffer is read from the currently running kernel diff --git a/sbin/pfctl/pfctl.c b/sbin/pfctl/pfctl.c index 87343f762842..a7bba4055b06 100644 --- a/sbin/pfctl/pfctl.c +++ b/sbin/pfctl/pfctl.c @@ -2513,8 +2513,9 @@ pfctl_load_ruleset(struct pfctl *pf, char *path, struct pfctl_ruleset *rs, printf("\n"); } - if (pf->optimize && rs_num == PF_RULESET_FILTER) - pfctl_optimize_ruleset(pf, rs); + if (pf->optimize && rs_num == PF_RULESET_FILTER && + (error = pfctl_optimize_ruleset(pf, rs)) != 0) + goto error; while ((r = TAILQ_FIRST(rs->rules[rs_num].active.ptr)) != NULL) { TAILQ_REMOVE(rs->rules[rs_num].active.ptr, r, entries); @@ -2614,13 +2615,13 @@ pfctl_load_rule(struct pfctl *pf, char *path, struct pfctl_rule *r, int depth) } } - if (pf->opts & PF_OPT_VERBOSE) { + if (pf->opts & PF_OPT_VERBOSE || was_present) { INDENT(depth, !(pf->opts & PF_OPT_VERBOSE2)); print_rule(r, name, pf->opts & PF_OPT_VERBOSE2, pf->opts & PF_OPT_NUMERIC); if (was_present) - printf(" -- rule was already present"); + printf(" -- rule was already present\n"); } path[len] = '\0'; pfctl_clear_pool(&r->rdr); diff --git a/sbin/pfctl/tests/files/pf1079.in b/sbin/pfctl/tests/files/pf1079.in new file mode 100644 index 000000000000..78a3f148f815 --- /dev/null +++ b/sbin/pfctl/tests/files/pf1079.in @@ -0,0 +1,2 @@ +pass in on lo0 proto tcp from any to 1.2.3.4/32 port 2222 rdr-to 10.0.0.10 nat-to 10.0.0.2 port 22 + diff --git a/sbin/pfctl/tests/files/pf1079.ok b/sbin/pfctl/tests/files/pf1079.ok new file mode 100644 index 000000000000..437e881855a1 --- /dev/null +++ b/sbin/pfctl/tests/files/pf1079.ok @@ -0,0 +1 @@ +pass in on lo0 inet proto tcp from any to 1.2.3.4 port = 2222 flags S/SA keep state nat-to 10.0.0.2 port 22 rdr-to 10.0.0.10 diff --git a/sbin/pfctl/tests/pfctl_test_list.inc b/sbin/pfctl/tests/pfctl_test_list.inc index ff51af7562d1..a2c64dfc0020 100644 --- a/sbin/pfctl/tests/pfctl_test_list.inc +++ b/sbin/pfctl/tests/pfctl_test_list.inc @@ -187,3 +187,4 @@ PFCTL_TEST(1075, "One shot rule") PFCTL_TEST(1076, "State limiter") PFCTL_TEST(1077, "Source limiter") PFCTL_TEST(1078, "New page") +PFCTL_TEST(1079, "rdr-to and nat-to") diff --git a/sbin/tunefs/tunefs.8 b/sbin/tunefs/tunefs.8 index 0fb11041d97d..8c2d0bbf5d1d 100644 --- a/sbin/tunefs/tunefs.8 +++ b/sbin/tunefs/tunefs.8 @@ -25,7 +25,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd November 17, 2023 +.Dd March 20, 2026 .Dt TUNEFS 8 .Os .Sh NAME @@ -92,6 +92,8 @@ this parameter should be set higher. Specify the expected average file size. .It Fl j Cm enable | disable Turn on/off soft updates journaling. +Enabling soft updates journaling implies enabling soft updates and is +incompatible with GEOM journaling. .Pp Enabling journaling reduces the time spent by .Xr fsck_ffs 8 @@ -128,7 +130,8 @@ is running. Running a full fsck on a UFS filesystem is the equivalent of running a scrub on a ZFS filesystem. .It Fl J Cm enable | disable -Turn on/off gjournal flag. +Turn on/off GEOM journaling. +GEOM journaling is incompatible with soft updates. .It Fl k Ar held-for-metadata-blocks Set the amount of space to be held for metadata blocks. When set, the file system preference routines will try to save @@ -171,6 +174,7 @@ been deleted to get under the higher threshold. Turn on/off the administrative NFSv4 ACL enable flag. .It Fl n Cm enable | disable Turn on/off soft updates. +Soft updates are incompatible with GEOM journaling. .It Fl o Cm space | time The file system can either try to minimize the time spent allocating blocks, or it can attempt to minimize the space diff --git a/sbin/tunefs/tunefs.c b/sbin/tunefs/tunefs.c index 6f6df8446cd9..d5ef366e8221 100644 --- a/sbin/tunefs/tunefs.c +++ b/sbin/tunefs/tunefs.c @@ -358,6 +358,9 @@ main(int argc, char *argv[]) if ((sblock.fs_flags & (FS_DOSOFTDEP | FS_SUJ)) == (FS_DOSOFTDEP | FS_SUJ)) { warnx("%s remains unchanged as enabled", name); + } else if (sblock.fs_flags & FS_GJOURNAL) { + warnx("%s cannot be enabled while GEOM " + "journaling is enabled", name); } else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); @@ -386,6 +389,9 @@ main(int argc, char *argv[]) if (strcmp(Jvalue, "enable") == 0) { if (sblock.fs_flags & FS_GJOURNAL) { warnx("%s remains unchanged as enabled", name); + } if (sblock.fs_flags & FS_DOSOFTDEP) { + warnx("%s cannot be enabled while soft " + "updates are enabled", name); } else { sblock.fs_flags |= FS_GJOURNAL; warnx("%s set", name); @@ -403,9 +409,9 @@ main(int argc, char *argv[]) } if (kflag) { name = "space to hold for metadata blocks"; - if (sblock.fs_metaspace == kvalue) + if (sblock.fs_metaspace == kvalue) { warnx("%s remains unchanged as %d", name, kvalue); - else { + } else { kvalue = blknum(&sblock, kvalue); if (kvalue > sblock.fs_fpg / 2) { kvalue = blknum(&sblock, sblock.fs_fpg / 2); @@ -477,9 +483,12 @@ main(int argc, char *argv[]) if (nflag) { name = "soft updates"; if (strcmp(nvalue, "enable") == 0) { - if (sblock.fs_flags & FS_DOSOFTDEP) + if (sblock.fs_flags & FS_DOSOFTDEP) { warnx("%s remains unchanged as enabled", name); - else if (sblock.fs_clean == 0) { + } else if (sblock.fs_flags & FS_GJOURNAL) { + warnx("%s cannot be enabled while GEOM " + "journaling is enabled", name); + } else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); } else { diff --git a/secure/lib/libcrypto/Makefile b/secure/lib/libcrypto/Makefile index 738de3479987..9d484e9d480c 100644 --- a/secure/lib/libcrypto/Makefile +++ b/secure/lib/libcrypto/Makefile @@ -687,12 +687,6 @@ SUBDIR.${MK_TESTS}= tests .include <bsd.lib.mk> -.if ${MACHINE} == "powerpc" -# Work around "relocation R_PPC_GOT16 out of range" errors -PICFLAG= -fPIC -.endif -PICFLAG+= -DOPENSSL_PIC - .if defined(ASM_${MACHINE_CPUARCH}) .PATH: ${SRCTOP}/sys/crypto/openssl/${MACHINE_CPUARCH} .if defined(ASM_amd64) diff --git a/secure/lib/libcrypto/Makefile.inc b/secure/lib/libcrypto/Makefile.inc index 73c650d590ff..8f22d501e005 100644 --- a/secure/lib/libcrypto/Makefile.inc +++ b/secure/lib/libcrypto/Makefile.inc @@ -12,6 +12,9 @@ CFLAGS+= -I${LCRYPTO_SRC}/providers/common/include CFLAGS+= -I${LCRYPTO_SRC}/providers/fips/include CFLAGS+= -I${LCRYPTO_SRC}/providers/implementations/include +PICFLAG= -fPIC +PICFLAG+= -DOPENSSL_PIC + .SUFFIXES: .pc .PATH.pc: ${LCRYPTO_SRC}/exporters diff --git a/share/man/man4/asmc.4 b/share/man/man4/asmc.4 index 4bc2a040bbce..9e3550661797 100644 --- a/share/man/man4/asmc.4 +++ b/share/man/man4/asmc.4 @@ -23,7 +23,7 @@ .\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd April 2, 2019 +.Dd March 29, 2026 .Dt ASMC 4 .Os .Sh NAME @@ -79,7 +79,9 @@ On systems, you can control the keyboard brightness by writing a value to the .Va dev.asmc.%d.light.control -sysctl MIB. +sysctl MIB or with +.Xr backlight 8 +utility. .Pp The following sysctl MIBs contains the raw value returned by the left and right light sensors: @@ -143,8 +145,15 @@ dramatically reduce your hard drive's life span. Do not rely solely on the SMS to protect your hard drive: good care and common sense can increase your hard drive's life. +.Sh FILES +.Bl -tag -width ".Pa /dev/backlight/asmc0" -compact +.It Pa /dev/backlight/asmc0 +Keyboard +.Xr backlight 8 +device node. .Sh SEE ALSO .Xr ataidle 8 Pq Pa ports/sysutils/ataidle , +.Xr backlight 8 , .Xr devd 8 , .Xr sysctl 8 .Sh HISTORY diff --git a/share/man/man4/hwpmc.4 b/share/man/man4/hwpmc.4 index 1ab690e5009c..5b3e9a195b8c 100644 --- a/share/man/man4/hwpmc.4 +++ b/share/man/man4/hwpmc.4 @@ -445,7 +445,8 @@ The default is 16. The size in kilobytes of each log buffer used by .Nm Ns 's logging function. -The default buffer size is 4KB. +The default buffer size is 256KB. +The maximum value is 16MB. .It Va kern.hwpmc.mincount Pq integer, read-write The minimum sampling rate for sampling mode PMCs. The default count is 1000 events. @@ -453,10 +454,15 @@ The default count is 1000 events. The size of the spin mutex pool used by the PMC driver. The default is 32. .It Va kern.hwpmc.nbuffers_pcpu Pq integer, read-only -The number of log buffers used by +The number of log buffers per CPU used by .Nm for logging. -The default is 64. +The default is 32. +The product of +.Va kern.hwpmc.nbuffers_pcpu +and +.Va kern.hwpmc.logbuffersize +must not exceed 32MB per CPU. .It Va kern.hwpmc.nsamples Pq integer, read-only The number of entries in the per-CPU ring buffer used during sampling. The default is 512. @@ -522,16 +528,23 @@ was not configured with the required configuration option .It "hwpmc: tunable hashsize=%d must be greater than zero." A negative value was supplied for tunable .Va kern.hwpmc.hashsize . -.It "hwpmc: tunable logbuffersize=%d must be greater than zero." +.It "hwpmc: logbuffersize=%d must be greater than zero and less than or equal to %d, resetting to %d." A negative value was supplied for tunable .Va kern.hwpmc.logbuffersize . -.It "hwpmc: tunable nlogbuffers=%d must be greater than zero." +.It "hwpmc: nbuffers_pcpu=%d must be greater than zero, resetting to %d." A negative value was supplied for tunable -.Va kern.hwpmc.nlogbuffers . +.Va kern.hwpmc.nbuffers_pcpu . .It "hwpmc: tunable nsamples=%d out of range." The value for tunable .Va kern.hwpmc.nsamples was negative or greater than 65535. +.It "hwpmc: nbuffers_pcpu=%d * logbuffersize=%d exceeds %dMB per CPU limit, resetting to defaults (%d * %d)." +The product of tunables +.Va kern.hwpmc.nbuffers_pcpu +and +.Va kern.hwpmc.logbuffersize +exceeds the maximum per-CPU memory limit. +Both tunables are reset to their compiled defaults. .El .Sh DEBUGGING The diff --git a/share/man/man4/rge.4 b/share/man/man4/rge.4 index a8266c439b83..2b781e287e3c 100644 --- a/share/man/man4/rge.4 +++ b/share/man/man4/rge.4 @@ -46,6 +46,11 @@ capable of TCP large send (TCP segmentation offload). .Pp The RTL8125, RTL8126 and RTL8127 devices are single-chip solutions combining both a MAC and PHY. +The +.Nm +driver manages the PHY directly rather than using the +.Xr miibus 4 +interface. Standalone cards are available in 1x PCIe models. .Pp The RTL8125, RTL8126 and RTL8127 also support jumbo frames, which can be @@ -121,8 +126,18 @@ For more information on configuring this device, see .Sh HARDWARE The .Nm -driver supports RealTek RTL8125, RTL8126, RTL8127 and Killer E3000 based -PCIe 1GB to 1GB Ethernet devices. +driver supports the following PCIe Ethernet adapters: +.Pp +.Bl -bullet -compact +.It +RealTek RTL8125 (up to 2.5 Gbps) +.It +RealTek RTL8126 (up to 5 Gbps) +.It +RealTek RTL8127 (up to 10 Gbps) +.It +Killer E3000 (up to 2.5 Gbps) +.El .Sh SYSCTL VARIABLES The following variables are available as both .Xr sysctl 8 @@ -132,6 +147,11 @@ tunables: .Bl -tag -width "xxxxxx" .It Va dev.rge.%d.debug Configure runtime debug output. This is a 32 bit bitmask. +.It Va dev.rge.%d.rx_process_limit +Maximum number of RX packets to process per interrupt. +The default value is 16. +Increasing this value may improve throughput on high-speed links at the +cost of increased interrupt latency. .El .Sh DIAGNOSTICS .Bl -diag diff --git a/share/man/man4/ufshci.4 b/share/man/man4/ufshci.4 index d722c9902b98..f9de9b39a449 100644 --- a/share/man/man4/ufshci.4 +++ b/share/man/man4/ufshci.4 @@ -124,9 +124,41 @@ nodes are currently implemented: (R) Host controller minor version. .It Va dev.ufshci.0.major_version (R) Host controller major version. +.It Va dev.ufshci.0.wb_enabled +(R) WriteBooster enable/disable. +.It Va dev.ufshci.0.wb_flush_enabled +(R) WriteBooster flush enable/disable. +.It Va dev.ufshci.0.wb_buffer_type +(R) WriteBooster type. +.It Va dev.ufshci.0.wb_buffer_size_mb +(R) WriteBooster buffer size in MB. +.It Va dev.ufshci.0.wb_user_space_config_option +(R) WriteBooster preserve user space mode. +.It Va dev.ufshci.0.auto_hibernation_supported +(R) Device auto hibernation support. +.It Va dev.ufshci.0.auto_hibernate_idle_timer_value +(R) Auto-Hibernate Idle Timer Value (in microseconds). +.It Va dev.ufshci.0.power_mode_supported +(R) Device power mode support. +.It Va dev.ufshci.0.power_mode +(R) Current device power mode. +.It Va dev.ufshci.0.tx_rx_power_mode +(R) Current TX/RX PA_PWRMode value. +.It Va dev.ufshci.0.max_tx_lanes +(R) Maximum available TX data lanes. +.It Va dev.ufshci.0.max_rx_lanes +(R) Maximum available RX data lanes. +.It Va dev.ufshci.0.tx_lanes +(R) Active TX data lanes. +.It Va dev.ufshci.0.rx_lanes +(R) Active RX data lanes. +.It Va dev.ufshci.0.max_rx_hs_gear +(R) Maximum available RX HS gear. +.It Va dev.ufshci.0.hs_gear +(R) Active HS gear. .It Va dev.ufshci.0.utmrq.num_failures (R) Number of failed UTP task-management requests. -.It Va dev.ufshci.0.utmrq.ioq.num_retries +.It Va dev.ufshci.0.utmrq.num_retries (R) Number of retried UTP task-management requests. .It Va dev.ufshci.0.utmrq.num_intr_handler_calls (R) Number of interrupt handler calls caused by UTP task-management requests. diff --git a/share/man/man5/src.conf.5 b/share/man/man5/src.conf.5 index ad503a132a2c..33ce061a1114 100644 --- a/share/man/man5/src.conf.5 +++ b/share/man/man5/src.conf.5 @@ -1,5 +1,5 @@ .\" DO NOT EDIT-- this file is @generated by tools/build/options/makeman. -.Dd March 20, 2026 +.Dd March 28, 2026 .Dt SRC.CONF 5 .Os .Sh NAME @@ -259,9 +259,20 @@ Set this if you do not want to build .Xr blocklistd 8 and .Xr blocklistctl 8 . +When set, it enforces these options: +.Pp +.Bl -item -compact +.It +.Va WITHOUT_BLACKLIST +.El +.Pp When set, these options are also in effect: .Pp .Bl -inset -compact +.It Va WITHOUT_BLACKLIST_SUPPORT +(unless +.Va WITH_BLACKLIST_SUPPORT +is set explicitly) .It Va WITHOUT_BLOCKLIST_SUPPORT (unless .Va WITH_BLOCKLIST_SUPPORT @@ -274,6 +285,12 @@ support, like .Xr fingerd 8 and .Xr sshd 8 . +When set, it enforces these options: +.Pp +.Bl -item -compact +.It +.Va WITHOUT_BLACKLIST_SUPPORT +.El .It Va WITHOUT_BLUETOOTH Do not build Bluetooth related kernel modules, programs and libraries. .It Va WITHOUT_BOOT diff --git a/share/man/man7/hier.7 b/share/man/man7/hier.7 index 6abce682b627..5482e5ea5c96 100644 --- a/share/man/man7/hier.7 +++ b/share/man/man7/hier.7 @@ -327,7 +327,7 @@ Explicitly distrusted certificates; see kernel state defaults; see .Xr sysctl.conf 5 .It Pa syslog.conf -system log configuration +system message log configuration .It Pa ttys tty creation configuration; see .Xr getty 8 @@ -851,7 +851,7 @@ default log for system daemons .It Pa devd.log default log for device state change daemon .It Pa dmesg.today -system message buffer log, rotates to +kernel message buffer log, rotates to .Pa dmesg.yesterday .It Pa debug.log undiscarded debug syslog messages @@ -862,7 +862,7 @@ logs for the line printer spooler daemon; see .Xr sendmail 8 log, rotates and compresses to maillog.0.bz2 .It Pa messages -general system log; see +general system message log; see .Xr syslogd 8 .It Pa mount.today currently loaded diff --git a/share/man/man7/security.7 b/share/man/man7/security.7 index 0685da5db437..4cbe4b3090b8 100644 --- a/share/man/man7/security.7 +++ b/share/man/man7/security.7 @@ -26,7 +26,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd June 1, 2024 +.Dd March 22, 2026 .Dt SECURITY 7 .Os .Sh NAME @@ -1099,6 +1099,7 @@ Enables mapping of simultaneously writable and executable pages for .Xr chflags 1 , .Xr find 1 , .Xr md5 1 , +.Xr mdo 1 , .Xr netstat 1 , .Xr openssl 1 , .Xr proccontrol 1 , diff --git a/share/man/man9/DEVICE_IDENTIFY.9 b/share/man/man9/DEVICE_IDENTIFY.9 index 31063ae60dff..564699b57a58 100644 --- a/share/man/man9/DEVICE_IDENTIFY.9 +++ b/share/man/man9/DEVICE_IDENTIFY.9 @@ -74,7 +74,7 @@ foo_identify(driver_t *driver, device_t parent) retrieve_device_information; if (devices matches one of your supported devices && - device_get_child(parent, "foo", DEVICE_UNIT_ANY) == NULL) { + device_find_child(parent, "foo", DEVICE_UNIT_ANY) == NULL) { child = BUS_ADD_CHILD(parent, 0, "foo", DEVICE_UNIT_ANY); bus_set_resource(child, SYS_RES_IOPORT, 0, FOO_IOADDR, 1); } diff --git a/share/mk/src.opts.mk b/share/mk/src.opts.mk index 1cadc8450de3..47538c138eb7 100644 --- a/share/mk/src.opts.mk +++ b/share/mk/src.opts.mk @@ -407,6 +407,14 @@ MK_BLOCKLIST:= no MK_BLOCKLIST_SUPPORT:= no .endif +.if ${MK_BLOCKLIST} == "no" +MK_BLACKLIST:= no +.endif + +.if ${MK_BLOCKLIST_SUPPORT} == "no" +MK_BLACKLIST_SUPPORT:= no +.endif + .if ${MK_CDDL} == "no" MK_CTF:= no MK_DTRACE:= no diff --git a/stand/defs.mk b/stand/defs.mk index b74dac395d15..7f3803c7ab42 100644 --- a/stand/defs.mk +++ b/stand/defs.mk @@ -11,6 +11,7 @@ FORTIFY_SOURCE= 0 MK_CTF= no MK_SSP= no MK_PIE= no +MK_RETPOLINE= no MK_ZEROREGS= no MAN= .if !defined(PIC) diff --git a/sys/amd64/amd64/exec_machdep.c b/sys/amd64/amd64/exec_machdep.c index 7d567c561c52..f81fc5f94406 100644 --- a/sys/amd64/amd64/exec_machdep.c +++ b/sys/amd64/amd64/exec_machdep.c @@ -143,7 +143,34 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; - bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); + sf.sf_uc.uc_mcontext.mc_rdi = regs->tf_rdi; + sf.sf_uc.uc_mcontext.mc_rsi = regs->tf_rsi; + sf.sf_uc.uc_mcontext.mc_rdx = regs->tf_rdx; + sf.sf_uc.uc_mcontext.mc_rcx = regs->tf_rcx; + sf.sf_uc.uc_mcontext.mc_r8 = regs->tf_r8; + sf.sf_uc.uc_mcontext.mc_r9 = regs->tf_r9; + sf.sf_uc.uc_mcontext.mc_rax = regs->tf_rax; + sf.sf_uc.uc_mcontext.mc_rbx = regs->tf_rbx; + sf.sf_uc.uc_mcontext.mc_rbp = regs->tf_rbp; + sf.sf_uc.uc_mcontext.mc_r10 = regs->tf_r10; + sf.sf_uc.uc_mcontext.mc_r11 = regs->tf_r11; + sf.sf_uc.uc_mcontext.mc_r12 = regs->tf_r12; + sf.sf_uc.uc_mcontext.mc_r13 = regs->tf_r13; + sf.sf_uc.uc_mcontext.mc_r14 = regs->tf_r14; + sf.sf_uc.uc_mcontext.mc_r15 = regs->tf_r15; + sf.sf_uc.uc_mcontext.mc_trapno = regs->tf_trapno; + sf.sf_uc.uc_mcontext.mc_fs = regs->tf_fs; + sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs; + sf.sf_uc.uc_mcontext.mc_addr = regs->tf_addr; + sf.sf_uc.uc_mcontext.mc_flags = regs->tf_flags; + sf.sf_uc.uc_mcontext.mc_es = regs->tf_es; + sf.sf_uc.uc_mcontext.mc_ds = regs->tf_ds; + sf.sf_uc.uc_mcontext.mc_err = regs->tf_err; + sf.sf_uc.uc_mcontext.mc_rip = regs->tf_rip; + sf.sf_uc.uc_mcontext.mc_cs = regs->tf_cs; + sf.sf_uc.uc_mcontext.mc_rflags = regs->tf_rflags; + sf.sf_uc.uc_mcontext.mc_rsp = regs->tf_rsp; + sf.sf_uc.uc_mcontext.mc_ss = regs->tf_ss; sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ get_fpcontext(td, &sf.sf_uc.uc_mcontext, &xfpusave, &xfpusave_len); update_pcb_bases(pcb); diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index e9bb27a54dfb..78199cb2ac03 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -1850,7 +1850,7 @@ wrmsr_early_safe_end(void) lidt(&wrmsr_early_safe_orig_efi_idt); gpf_descr = &idt0[IDT_GP]; - memset(gpf_descr, 0, sizeof(*gpf_descr)); + memset_early(gpf_descr, 0, sizeof(*gpf_descr)); } #ifdef KDB diff --git a/sys/amd64/conf/FIRECRACKER b/sys/amd64/conf/FIRECRACKER index 07c02d51ed4c..676199602ede 100644 --- a/sys/amd64/conf/FIRECRACKER +++ b/sys/amd64/conf/FIRECRACKER @@ -29,7 +29,6 @@ options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 -options ROUTE_MPATH # Multipath routing support options FIB_ALGO # Modular fib lookups options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC index 07e31e354d45..7de19f86afbf 100644 --- a/sys/amd64/conf/GENERIC +++ b/sys/amd64/conf/GENERIC @@ -33,7 +33,6 @@ options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options IPSEC_OFFLOAD # Inline ipsec offload infra -options ROUTE_MPATH # Multipath routing support options FIB_ALGO # Modular fib lookups options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES index b1a6995e90eb..fac8d9314cad 100644 --- a/sys/amd64/conf/NOTES +++ b/sys/amd64/conf/NOTES @@ -71,6 +71,9 @@ device smartpqi # Broadcom MPT Fusion, version 4, is 64-bit only device mpi3mr # LSI-Logic MPT-Fusion 4 +# Universal Flash Storage Host Controller Interface support +device ufshci # UFS host controller + # # Network interfaces: # diff --git a/sys/amd64/vmm/x86.c b/sys/amd64/vmm/x86.c index f32107124eb8..6794110f067a 100644 --- a/sys/amd64/vmm/x86.c +++ b/sys/amd64/vmm/x86.c @@ -37,6 +37,7 @@ #include <machine/segments.h> #include <machine/specialreg.h> #include <machine/vmm.h> +#include <x86/bhyve.h> #include <dev/vmm/vmm_ktr.h> #include <dev/vmm/vmm_vm.h> @@ -50,12 +51,8 @@ static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, NULL); #define CPUID_VM_SIGNATURE 0x40000000 -#define CPUID_BHYVE_FEATURES 0x40000001 #define CPUID_VM_HIGH CPUID_BHYVE_FEATURES -/* Features advertised in CPUID_BHYVE_FEATURES %eax */ -#define CPUID_BHYVE_FEAT_EXT_DEST_ID (1UL << 0) /* MSI Extended Dest ID */ - static const char bhyve_id[12] = "bhyve bhyve "; static uint64_t bhyve_xcpuids; diff --git a/sys/arm64/conf/std.arm64 b/sys/arm64/conf/std.arm64 index 68ad7ebc856e..8069703626c9 100644 --- a/sys/arm64/conf/std.arm64 +++ b/sys/arm64/conf/std.arm64 @@ -14,7 +14,6 @@ options INET6 # IPv6 communications protocols options CC_CUBIC # include CUBIC congestion control options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options IPSEC_OFFLOAD # Inline ipsec offload infra -options ROUTE_MPATH # Multipath routing support options FIB_ALGO # Modular fib lookups options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging diff --git a/sys/compat/linuxkpi/common/include/linux/hardirq.h b/sys/compat/linuxkpi/common/include/linux/hardirq.h index f79451dd0d35..c6cbf1a34f14 100644 --- a/sys/compat/linuxkpi/common/include/linux/hardirq.h +++ b/sys/compat/linuxkpi/common/include/linux/hardirq.h @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/lockdep.h> +#include <linux/preempt.h> #include <sys/param.h> #include <sys/bus.h> diff --git a/sys/compat/linuxkpi/common/include/linux/highmem.h b/sys/compat/linuxkpi/common/include/linux/highmem.h index 58a9cdcdf60f..dc1c4fe2f299 100644 --- a/sys/compat/linuxkpi/common/include/linux/highmem.h +++ b/sys/compat/linuxkpi/common/include/linux/highmem.h @@ -45,6 +45,7 @@ #include <linux/mm.h> #include <linux/page.h> +#include <linux/hardirq.h> #define PageHighMem(p) (0) diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h index 63dc343d1461..a786cbab5e13 100644 --- a/sys/compat/linuxkpi/common/include/linux/spinlock.h +++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h @@ -41,6 +41,7 @@ #include <linux/rwlock.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> +#include <linux/preempt.h> typedef struct mtx spinlock_t; diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 98c63d9bb7f7..4dda93e2ee70 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -1031,8 +1031,6 @@ device dummymbuf # TCP_HHOOK enables the hhook(9) framework hooks for the TCP stack. # SOCKET_HHOOK enables the hhook(9) framework hooks for socket operations. # -# ROUTE_MPATH provides support for multipath routing. -# options MROUTING # Multicast routing options IPFIREWALL #firewall options IPFIREWALL_VERBOSE #enable logging to syslogd(8) @@ -1052,7 +1050,6 @@ options PF_DEFAULT_TO_DROP #drop everything by default options TCP_BLACKBOX options TCP_HHOOK options SOCKET_HHOOK -options ROUTE_MPATH # The MBUF_STRESS_TEST option enables options which create # various random failures / extreme cases related to mbuf @@ -2089,6 +2086,7 @@ device sound # snd_cs4281: Crystal Semiconductor CS4281 PCI. # snd_csa: Crystal Semiconductor CS461x/428x PCI. (except # 4281) +# snd_dummy: Dummy testing driver. # snd_emu10k1: Creative EMU10K1 PCI and EMU10K2 (Audigy) PCI. # snd_emu10kx: Creative SoundBlaster Live! and Audigy # snd_envy24: VIA Envy24 and compatible, needs snd_spicds. @@ -2118,6 +2116,7 @@ device snd_atiixp device snd_cmi device snd_cs4281 device snd_csa +device snd_dummy device snd_emu10k1 device snd_emu10kx device snd_envy24 @@ -2157,19 +2156,9 @@ envvar hint.gusc.0.flags="0x13" # # Following options are intended for debugging/testing purposes: # -# SND_DEBUG Enable extra debugging code that includes -# sanity checking and possible increase of -# verbosity. -# # SND_DIAGNOSTIC Similar in a spirit of INVARIANTS/DIAGNOSTIC, # zero tolerance against inconsistencies. # -# SND_FEEDER_MULTIFORMAT By default, only 16/32 bit feeders are compiled -# in. This options enable most feeder converters -# except for 8bit. WARNING: May bloat the kernel. -# -# SND_FEEDER_FULL_MULTIFORMAT Ditto, but includes 8bit feeders as well. -# # SND_FEEDER_RATE_HP (feeder_rate) High precision 64bit arithmetic # as much as possible (the default trying to # avoid it). Possible slowdown. @@ -2179,16 +2168,9 @@ envvar hint.gusc.0.flags="0x13" # integer/arithmetic. Slight increase of dynamic # range at a cost of possible slowdown. # -# SND_OLDSTEREO Only 2 channels are allowed, effectively -# disabling multichannel processing. -# -options SND_DEBUG options SND_DIAGNOSTIC -options SND_FEEDER_MULTIFORMAT -options SND_FEEDER_FULL_MULTIFORMAT options SND_FEEDER_RATE_HP options SND_PCM_64 -options SND_OLDSTEREO # # Cardbus diff --git a/sys/conf/files b/sys/conf/files index c6151b0b73cf..de13b76e71a4 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -3209,6 +3209,7 @@ dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/sndstat.c optional sound +dev/sound/dummy.c optional snd_dummy sound dev/spibus/acpi_spibus.c optional acpi spibus dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ @@ -3284,10 +3285,11 @@ dev/uart/uart_tty.c optional uart # Universal Flash Storage Host Controller Interface drivers # dev/ufshci/ufshci.c optional ufshci +dev/ufshci/ufshci_acpi.c optional ufshci acpi dev/ufshci/ufshci_ctrlr.c optional ufshci dev/ufshci/ufshci_ctrlr_cmd.c optional ufshci dev/ufshci/ufshci_dev.c optional ufshci -dev/ufshci/ufshci_pci.c optional ufshci +dev/ufshci/ufshci_pci.c optional ufshci pci dev/ufshci/ufshci_req_queue.c optional ufshci dev/ufshci/ufshci_req_sdb.c optional ufshci dev/ufshci/ufshci_sim.c optional ufshci @@ -4244,8 +4246,8 @@ net/debugnet_inet.c optional inet debugnet net/pfil.c optional ether | inet net/radix.c standard net/route.c standard -net/route/nhgrp.c optional route_mpath -net/route/nhgrp_ctl.c optional route_mpath +net/route/nhgrp.c standard +net/route/nhgrp_ctl.c standard net/route/nhop.c standard net/route/nhop_ctl.c standard net/route/nhop_utils.c standard @@ -4261,7 +4263,7 @@ net/route/route_temporal.c standard net/rss_config.c standard net/rtsock.c standard net/slcompress.c optional netgraph_vjc -net/toeplitz.c optional inet | inet6 | route_mpath +net/toeplitz.c optional inet | inet6 net/vnet.c optional vimage net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl diff --git a/sys/conf/options b/sys/conf/options index 4aeb15a489ea..155fbf8e6c8a 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -473,7 +473,6 @@ MROUTING opt_mrouting.h NFSLOCKD NETLINK opt_global.h PF_DEFAULT_TO_DROP opt_pf.h -ROUTE_MPATH opt_route.h ROUTETABLES opt_route.h FIB_ALGO opt_route.h RSS opt_rss.h @@ -915,13 +914,9 @@ CFI_ARMEDANDDANGEROUS opt_cfi.h CFI_HARDWAREBYTESWAP opt_cfi.h # Sound options -SND_DEBUG opt_snd.h SND_DIAGNOSTIC opt_snd.h -SND_FEEDER_MULTIFORMAT opt_snd.h -SND_FEEDER_FULL_MULTIFORMAT opt_snd.h SND_FEEDER_RATE_HP opt_snd.h SND_PCM_64 opt_snd.h -SND_OLDSTEREO opt_snd.h X86BIOS diff --git a/sys/contrib/xen/arch-x86/cpuid.h b/sys/contrib/xen/arch-x86/cpuid.h index ce46305bee99..f2b2b3632c2d 100644 --- a/sys/contrib/xen/arch-x86/cpuid.h +++ b/sys/contrib/xen/arch-x86/cpuid.h @@ -102,6 +102,13 @@ #define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) #define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */ #define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */ +/* + * With interrupt format set to 0 (non-remappable) bits 55:49 from the + * IO-APIC RTE and bits 11:5 from the MSI address can be used to store + * high bits for the Destination ID. This expands the Destination ID + * field from 8 to 15 bits, allowing to target APIC IDs up 32768. + */ +#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5) /* * Leaf 6 (0x40000x05) diff --git a/sys/dev/acpica/acpi_spmc.c b/sys/dev/acpica/acpi_spmc.c index a961b4a188dc..d6d4f2d34f2f 100644 --- a/sys/dev/acpica/acpi_spmc.c +++ b/sys/dev/acpica/acpi_spmc.c @@ -1,7 +1,7 @@ /* * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2024-2025 The FreeBSD Foundation + * Copyright (c) 2024-2026 The FreeBSD Foundation * * This software was developed by Aymeric Wibo <obiwac@freebsd.org> * under sponsorship from the FreeBSD Foundation. @@ -86,8 +86,9 @@ static struct dsm_set intel_dsm_set = { 0xc4eb40a0, 0x6cd2, 0x11e2, 0xbc, 0xfd, {0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66}, }, - .dsms_expected = DSM_GET_DEVICE_CONSTRAINTS | DSM_DISPLAY_OFF_NOTIF | - DSM_DISPLAY_ON_NOTIF | DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF, + .dsms_expected = (1 << DSM_GET_DEVICE_CONSTRAINTS) | + (1 << DSM_DISPLAY_OFF_NOTIF) | (1 << DSM_DISPLAY_ON_NOTIF) | + (1 << DSM_ENTRY_NOTIF) | (1 << DSM_EXIT_NOTIF), }; SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, intel_dsm_revision, CTLFLAG_RW, @@ -102,9 +103,10 @@ static struct dsm_set ms_dsm_set = { 0x11e00d56, 0xce64, 0x47ce, 0x83, 0x7b, {0x1f, 0x89, 0x8f, 0x9a, 0xa4, 0x61}, }, - .dsms_expected = DSM_DISPLAY_OFF_NOTIF | DSM_DISPLAY_ON_NOTIF | - DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF | DSM_MODERN_ENTRY_NOTIF | - DSM_MODERN_EXIT_NOTIF, + .dsms_expected = (1 << DSM_DISPLAY_OFF_NOTIF) | + (1 << DSM_DISPLAY_ON_NOTIF) | (1 << DSM_ENTRY_NOTIF) | + (1 << DSM_EXIT_NOTIF) | (1 << DSM_MODERN_ENTRY_NOTIF) | + (1 << DSM_MODERN_EXIT_NOTIF), }; static struct dsm_set amd_dsm_set = { @@ -124,9 +126,9 @@ static struct dsm_set amd_dsm_set = { 0xe3f32452, 0xfebc, 0x43ce, 0x90, 0x39, {0x93, 0x21, 0x22, 0xd3, 0x77, 0x21}, }, - .dsms_expected = AMD_DSM_GET_DEVICE_CONSTRAINTS | AMD_DSM_ENTRY_NOTIF | - AMD_DSM_EXIT_NOTIF | AMD_DSM_DISPLAY_OFF_NOTIF | - AMD_DSM_DISPLAY_ON_NOTIF, + .dsms_expected = (1 << AMD_DSM_GET_DEVICE_CONSTRAINTS) | + (1 << AMD_DSM_ENTRY_NOTIF) | (1 << AMD_DSM_EXIT_NOTIF) | + (1 << AMD_DSM_DISPLAY_OFF_NOTIF) | (1 << AMD_DSM_DISPLAY_ON_NOTIF), }; SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, amd_dsm_revision, CTLFLAG_RW, @@ -252,7 +254,7 @@ static void acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc, ACPI_HANDLE handle, struct dsm_set *dsm_set) { - const uint64_t dsms_supported = acpi_DSMQuery(handle, + uint64_t dsms_supported = acpi_DSMQuery(handle, (uint8_t *)&dsm_set->uuid, dsm_set->revision); /* @@ -261,6 +263,7 @@ acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc, ACPI_HANDLE handle, */ if ((dsms_supported & 1) == 0) return; + dsms_supported &= ~1; if ((dsms_supported & dsm_set->dsms_expected) != dsm_set->dsms_expected) { device_printf(sc->dev, "DSM set %s does not support expected " diff --git a/sys/dev/asmc/asmc.c b/sys/dev/asmc/asmc.c index 17a282ce0b97..4a6734e22786 100644 --- a/sys/dev/asmc/asmc.c +++ b/sys/dev/asmc/asmc.c @@ -58,6 +58,9 @@ #include <dev/acpica/acpivar.h> #include <dev/asmc/asmcvar.h> +#include <dev/backlight/backlight.h> +#include "backlight_if.h" + /* * Device interface. */ @@ -67,6 +70,15 @@ static int asmc_detach(device_t dev); static int asmc_resume(device_t dev); /* + * Backlight interface. + */ +static int asmc_backlight_update_status(device_t dev, + struct backlight_props *props); +static int asmc_backlight_get_status(device_t dev, + struct backlight_props *props); +static int asmc_backlight_get_info(device_t dev, struct backlight_info *info); + +/* * SMC functions. */ static int asmc_init(device_t dev); @@ -581,6 +593,12 @@ static device_method_t asmc_methods[] = { DEVMETHOD(device_attach, asmc_attach), DEVMETHOD(device_detach, asmc_detach), DEVMETHOD(device_resume, asmc_resume), + + /* Backlight interface */ + DEVMETHOD(backlight_update_status, asmc_backlight_update_status), + DEVMETHOD(backlight_get_status, asmc_backlight_get_status), + DEVMETHOD(backlight_get_info, asmc_backlight_get_info), + DEVMETHOD_END }; @@ -606,8 +624,10 @@ static char *asmc_ids[] = { "APP0001", NULL }; static unsigned int light_control = 0; +ACPI_PNP_INFO(asmc_ids); DRIVER_MODULE(asmc, acpi, asmc_driver, NULL, NULL); MODULE_DEPEND(asmc, acpi, 1, 1, 1); +MODULE_DEPEND(asmc, backlight, 1, 1, 1); static const struct asmc_model * asmc_match(device_t dev) @@ -799,6 +819,13 @@ asmc_attach(device_t dev) CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, dev, 0, model->smc_light_control, "I", "Keyboard backlight brightness control"); + + sc->sc_kbd_bkl = backlight_register("asmc", dev); + if (sc->sc_kbd_bkl == NULL) { + device_printf(dev, "Can not register backlight\n"); + ret = ENXIO; + goto err; + } } if (model->smc_sms_x == NULL) @@ -881,6 +908,9 @@ asmc_detach(device_t dev) { struct asmc_softc *sc = device_get_softc(dev); + if (sc->sc_kbd_bkl != NULL) + backlight_destroy(sc->sc_kbd_bkl); + if (sc->sc_sms_tq) { taskqueue_drain(sc->sc_sms_tq, &sc->sc_sms_task); taskqueue_free(sc->sc_sms_tq); @@ -1738,6 +1768,7 @@ static int asmc_mbp_sysctl_light_control(SYSCTL_HANDLER_ARGS) { device_t dev = (device_t)arg1; + struct asmc_softc *sc = device_get_softc(dev); uint8_t buf[2]; int error; int v; @@ -1749,6 +1780,7 @@ asmc_mbp_sysctl_light_control(SYSCTL_HANDLER_ARGS) if (v < 0 || v > 255) return (EINVAL); light_control = v; + sc->sc_kbd_bkl_level = v * 100 / 255; buf[0] = light_control; buf[1] = 0x00; asmc_key_write(dev, ASMC_KEY_LIGHTVALUE, buf, sizeof(buf)); @@ -1816,3 +1848,38 @@ asmc_wol_sysctl(SYSCTL_HANDLER_ARGS) return (0); } + +static int +asmc_backlight_update_status(device_t dev, struct backlight_props *props) +{ + struct asmc_softc *sc = device_get_softc(dev); + uint8_t buf[2]; + + sc->sc_kbd_bkl_level = props->brightness; + light_control = props->brightness * 255 / 100; + buf[0] = light_control; + buf[1] = 0x00; + asmc_key_write(dev, ASMC_KEY_LIGHTVALUE, buf, sizeof(buf)); + + return (0); +} + +static int +asmc_backlight_get_status(device_t dev, struct backlight_props *props) +{ + struct asmc_softc *sc = device_get_softc(dev); + + props->brightness = sc->sc_kbd_bkl_level; + props->nlevels = 0; + + return (0); +} + +static int +asmc_backlight_get_info(device_t dev, struct backlight_info *info) +{ + info->type = BACKLIGHT_TYPE_KEYBOARD; + strlcpy(info->name, "Apple MacBook Keyboard", BACKLIGHTMAXNAMELENGTH); + + return (0); +} diff --git a/sys/dev/asmc/asmcvar.h b/sys/dev/asmc/asmcvar.h index 95a117f59533..cfc176559ed9 100644 --- a/sys/dev/asmc/asmcvar.h +++ b/sys/dev/asmc/asmcvar.h @@ -51,6 +51,8 @@ struct asmc_softc { struct taskqueue *sc_sms_tq; struct task sc_sms_task; uint8_t sc_sms_intr_works; + struct cdev *sc_kbd_bkl; + uint32_t sc_kbd_bkl_level; }; /* diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c index c72e68b8a62f..49e72c8ee14f 100644 --- a/sys/dev/dpaa2/dpaa2_ni.c +++ b/sys/dev/dpaa2/dpaa2_ni.c @@ -3004,6 +3004,9 @@ dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch, goto err_unload; } + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE); + /* TODO: Enqueue several frames in a single command */ for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) { /* TODO: Return error codes instead of # of frames */ @@ -3013,9 +3016,6 @@ dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch, } } - bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE); - bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE); - if (rc != 1) { fq->chan->tx_dropped++; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); diff --git a/sys/dev/evdev/evdev_utils.c b/sys/dev/evdev/evdev_utils.c index a075a9be9bb7..d7b7b790dc2c 100644 --- a/sys/dev/evdev/evdev_utils.c +++ b/sys/dev/evdev/evdev_utils.c @@ -92,8 +92,8 @@ static uint16_t evdev_usb_scancodes[256] = { NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, /* 0xc0 - 0xdf */ - NONE, NONE, NONE, NONE, - NONE, NONE, NONE, NONE, + KEY_BRIGHTNESSDOWN, KEY_BRIGHTNESSUP, KEY_SCALE, KEY_DASHBOARD, + KEY_KBDILLUMDOWN, KEY_KBDILLUMUP, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, @@ -108,7 +108,12 @@ static uint16_t evdev_usb_scancodes[256] = { KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND, KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_COFFEE, KEY_REFRESH, KEY_CALC, - NONE, NONE, NONE, NONE, + /* + * last item maps to APPLE_FN_KEY in hkbd.c. using KEY_WAKEUP instead + * of KEY_FN as evdev translates the latter to too high of a code for + * xkb to parse. + */ + NONE, NONE, NONE, KEY_WAKEUP, }; diff --git a/sys/dev/hid/hgame.c b/sys/dev/hid/hgame.c index 8dde6b5550c9..693c5afee034 100644 --- a/sys/dev/hid/hgame.c +++ b/sys/dev/hid/hgame.c @@ -134,28 +134,20 @@ hgame_dpad_cb(HIDMAP_CB_ARGS) data = ctx.data; switch (HIDMAP_CB_UDATA64) { case HUG_D_PAD_UP: - if (sc->dpad_down) - return (ENOMSG); - evdev_push_abs(evdev, ABS_HAT0Y, (data == 0) ? 0 : -1); sc->dpad_up = (data != 0); + evdev_push_abs(evdev, ABS_HAT0Y, sc->dpad_down - sc->dpad_up); break; case HUG_D_PAD_DOWN: - if (sc->dpad_up) - return (ENOMSG); - evdev_push_abs(evdev, ABS_HAT0Y, (data == 0) ? 0 : 1); sc->dpad_down = (data != 0); + evdev_push_abs(evdev, ABS_HAT0Y, sc->dpad_down - sc->dpad_up); break; case HUG_D_PAD_RIGHT: - if (sc->dpad_left) - return (ENOMSG); - evdev_push_abs(evdev, ABS_HAT0X, (data == 0) ? 0 : 1); sc->dpad_right = (data != 0); + evdev_push_abs(evdev, ABS_HAT0X, sc->dpad_right - sc->dpad_left); break; case HUG_D_PAD_LEFT: - if (sc->dpad_right) - return (ENOMSG); - evdev_push_abs(evdev, ABS_HAT0X, (data == 0) ? 0 : -1); sc->dpad_left = (data != 0); + evdev_push_abs(evdev, ABS_HAT0X, sc->dpad_right - sc->dpad_left); break; } break; diff --git a/sys/dev/hid/hid.h b/sys/dev/hid/hid.h index e56f8ffe772b..02709d549a56 100644 --- a/sys/dev/hid/hid.h +++ b/sys/dev/hid/hid.h @@ -57,8 +57,10 @@ #define HUP_SCALE 0x008c #define HUP_CAMERA_CONTROL 0x0090 #define HUP_ARCADE 0x0091 +#define HUP_APPLE 0x00ff #define HUP_FIDO 0xf1d0 #define HUP_MICROSOFT 0xff00 +#define HUP_HP 0xff01 /* Usages, generic desktop */ #define HUG_POINTER 0x0001 diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c index 6255c42d3b62..c98f4be69169 100644 --- a/sys/dev/hid/hkbd.c +++ b/sys/dev/hid/hkbd.c @@ -73,6 +73,8 @@ #include <dev/hid/hidquirk.h> #include <dev/hid/hidrdesc.h> +#include "usbdevs.h" + #ifdef EVDEV_SUPPORT #include <dev/evdev/input.h> #include <dev/evdev/evdev.h> @@ -97,6 +99,7 @@ static int hkbd_debug = 0; #endif static int hkbd_no_leds = 0; +static int hkbd_apple_fn_mode = 0; static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard"); #ifdef HID_DEBUG @@ -105,6 +108,8 @@ SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN, #endif SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN, &hkbd_no_leds, 0, "Disables setting of keyboard leds"); +SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, apple_fn_mode, CTLFLAG_RWTUN, + &hkbd_apple_fn_mode, 0, "0 = Fn + F1..12 -> media, 1 = F1..F12 -> media"); #define INPUT_EPOCH global_epoch_preempt @@ -126,6 +131,10 @@ SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN, #define MOD_MIN 0xe0 #define MOD_MAX 0xe7 +/* check evdev_usb_scancodes[] for names */ +#define APPLE_FN_KEY 0xff +#define APPLE_EJECT_KEY 0xec + struct hkbd_softc { device_t sc_dev; @@ -289,9 +298,9 @@ static const uint8_t hkbd_trtab[256] = { NN, NN, NN, NN, NN, NN, NN, NN, /* D0 - D7 */ NN, NN, NN, NN, NN, NN, NN, NN, /* D8 - DF */ 29, 42, 56, 105, 90, 54, 93, 106, /* E0 - E7 */ - NN, NN, NN, NN, NN, NN, NN, NN, /* E8 - EF */ + NN, NN, NN, NN, 254, NN, NN, NN, /* E8 - EF */ NN, NN, NN, NN, NN, NN, NN, NN, /* F0 - F7 */ - NN, NN, NN, NN, NN, NN, NN, NN, /* F8 - FF */ + NN, NN, NN, NN, NN, NN, NN, 255, /* F8 - FF */ }; static const uint8_t hkbd_boot_desc[] = { HID_KBD_BOOTPROTO_DESCR() }; @@ -516,13 +525,14 @@ hkbd_interrupt(struct hkbd_softc *sc) continue; hkbd_put_key(sc, key | KEY_PRESS); - sc->sc_co_basetime = sbinuptime(); - sc->sc_delay = sc->sc_kbd.kb_delay1; - hkbd_start_timer(sc); - - /* set repeat time for last key */ - sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1; - sc->sc_repeat_key = key; + if (key != APPLE_FN_KEY) { + sc->sc_co_basetime = sbinuptime(); + sc->sc_delay = sc->sc_kbd.kb_delay1; + hkbd_start_timer(sc); + /* set repeat time for last key */ + sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1; + sc->sc_repeat_key = key; + } } /* synchronize old data with new data */ @@ -613,6 +623,16 @@ static uint32_t hkbd_apple_fn(uint32_t keycode) { switch (keycode) { + case 0x0b: return 0x50; /* H -> LEFT ARROW */ + case 0x0d: return 0x51; /* J -> DOWN ARROW */ + case 0x0e: return 0x52; /* K -> UP ARROW */ + case 0x0f: return 0x4f; /* L -> RIGHT ARROW */ + case 0x36: return 0x4a; /* COMMA -> HOME */ + case 0x37: return 0x4d; /* DOT -> END */ + case 0x18: return 0x4b; /* U -> PGUP */ + case 0x07: return 0x4e; /* D -> PGDN */ + case 0x16: return 0x47; /* S -> SCROLLLOCK */ + case 0x13: return 0x46; /* P -> SYSRQ/PRTSC */ case 0x28: return 0x49; /* RETURN -> INSERT */ case 0x2a: return 0x4c; /* BACKSPACE -> DEL */ case 0x50: return 0x4a; /* LEFT ARROW -> HOME */ @@ -623,6 +643,27 @@ hkbd_apple_fn(uint32_t keycode) } } +/* separate so the sysctl doesn't butcher non-fn keys */ +static uint32_t +hkbd_apple_fn_media(uint32_t keycode) +{ + switch (keycode) { + case 0x3a: return 0xc0; /* F1 -> BRIGHTNESS DOWN */ + case 0x3b: return 0xc1; /* F2 -> BRIGHTNESS UP */ + case 0x3c: return 0xc2; /* F3 -> SCALE (MISSION CTRL)*/ + case 0x3d: return 0xc3; /* F4 -> DASHBOARD (LAUNCHPAD) */ + case 0x3e: return 0xc4; /* F5 -> KBD BACKLIGHT DOWN */ + case 0x3f: return 0xc5; /* F6 -> KBD BACKLIGHT UP */ + case 0x40: return 0xea; /* F7 -> MEDIA PREV */ + case 0x41: return 0xe8; /* F8 -> PLAY/PAUSE */ + case 0x42: return 0xeb; /* F9 -> MEDIA NEXT */ + case 0x43: return 0xef; /* F10 -> MUTE */ + case 0x44: return 0xee; /* F11 -> VOLUME DOWN */ + case 0x45: return 0xed; /* F12 -> VOLUME UP */ + default: return keycode; + } +} + static uint32_t hkbd_apple_swap(uint32_t keycode) { @@ -675,18 +716,30 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len) /* clear modifiers */ modifiers = 0; - /* scan through HID data */ + /* scan through HID data and expose magic apple keys */ if ((sc->sc_flags & HKBD_FLAG_APPLE_EJECT) && (id == sc->sc_id_apple_eject)) { - if (hid_get_data(buf, len, &sc->sc_loc_apple_eject)) + if (hid_get_data(buf, len, &sc->sc_loc_apple_eject)) { + bit_set(sc->sc_ndata, APPLE_EJECT_KEY); modifiers |= MOD_EJECT; + } else { + bit_clear(sc->sc_ndata, APPLE_EJECT_KEY); + } } if ((sc->sc_flags & HKBD_FLAG_APPLE_FN) && (id == sc->sc_id_apple_fn)) { - if (hid_get_data(buf, len, &sc->sc_loc_apple_fn)) + if (hid_get_data(buf, len, &sc->sc_loc_apple_fn)) { + bit_set(sc->sc_ndata, APPLE_FN_KEY); modifiers |= MOD_FN; + } else { + bit_clear(sc->sc_ndata, APPLE_FN_KEY); + } } + int apply_apple_fn_media = (modifiers & MOD_FN) ? 1 : 0; + if (hkbd_apple_fn_mode) /* toggle from sysctl value */ + apply_apple_fn_media = !apply_apple_fn_media; + bit_foreach(sc->sc_loc_key_valid, HKBD_NKEYCODE, i) { if (id != sc->sc_id_loc_key[i]) { continue; /* invalid HID ID */ @@ -710,6 +763,8 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len) } if (modifiers & MOD_FN) key = hkbd_apple_fn(key); + if (apply_apple_fn_media) + key = hkbd_apple_fn_media(key); if (sc->sc_flags & HKBD_FLAG_APPLE_SWAP) key = hkbd_apple_swap(key); if (key == KEY_NONE || key >= HKBD_NKEYCODE) @@ -723,6 +778,8 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len) if (modifiers & MOD_FN) key = hkbd_apple_fn(key); + if (apply_apple_fn_media) + key = hkbd_apple_fn_media(key); if (sc->sc_flags & HKBD_FLAG_APPLE_SWAP) key = hkbd_apple_swap(key); if (key == KEY_NONE || key == KEY_ERROR || key >= HKBD_NKEYCODE) @@ -783,25 +840,43 @@ hkbd_parse_hid(struct hkbd_softc *sc, const uint8_t *ptr, uint32_t len, sc->sc_kbd_size = hid_report_size_max(ptr, len, hid_input, &sc->sc_kbd_id); + const struct hid_device_info *hw = hid_get_device_info(sc->sc_dev); + /* investigate if this is an Apple Keyboard */ - if (hidbus_locate(ptr, len, - HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT), - hid_input, tlc_index, 0, &sc->sc_loc_apple_eject, &flags, - &sc->sc_id_apple_eject, NULL)) { - if (flags & HIO_VARIABLE) - sc->sc_flags |= HKBD_FLAG_APPLE_EJECT | - HKBD_FLAG_APPLE_SWAP; - DPRINTFN(1, "Found Apple eject-key\n"); - } - if (hidbus_locate(ptr, len, - HID_USAGE2(0xFFFF, 0x0003), - hid_input, tlc_index, 0, &sc->sc_loc_apple_fn, &flags, - &sc->sc_id_apple_fn, NULL)) { - if (flags & HIO_VARIABLE) - sc->sc_flags |= HKBD_FLAG_APPLE_FN; - DPRINTFN(1, "Found Apple FN-key\n"); + if (hw->idVendor == USB_VENDOR_APPLE) { /* belt & braces! */ + if (hidbus_locate(ptr, len, + HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT), + hid_input, tlc_index, 0, &sc->sc_loc_apple_eject, &flags, + &sc->sc_id_apple_eject, NULL)) { + if (flags & HIO_VARIABLE) + sc->sc_flags |= HKBD_FLAG_APPLE_EJECT | + HKBD_FLAG_APPLE_SWAP; + DPRINTFN(1, "Found Apple eject-key\n"); + } + /* + * check the same vendor pages that linux does to find the one + * apple uses for the function key. + */ + static const uint16_t apple_pages[] = { + HUP_APPLE, /* HID_UP_CUSTOM in linux */ + HUP_MICROSOFT, /* HID_UP_MSVENDOR in linux */ + HUP_HP, /* HID_UP_HPVENDOR2 in linux */ + 0xFFFF /* Original FreeBSD check (Remove?) */ + }; + for (int i = 0; i < (int)nitems(apple_pages); i++) { + if (hidbus_locate(ptr, len, + HID_USAGE2(apple_pages[i], 0x0003), + hid_input, tlc_index, 0, &sc->sc_loc_apple_fn, &flags, + &sc->sc_id_apple_fn, NULL)) { + if (flags & HIO_VARIABLE) + sc->sc_flags |= HKBD_FLAG_APPLE_FN; + DPRINTFN(1, "Found Apple FN-key on page 0x%04x\n", + apple_pages[i]); + break; + } + } } - + /* figure out event buffer */ if (hidbus_locate(ptr, len, HID_USAGE2(HUP_KEYBOARD, 0x00), diff --git a/sys/dev/hwpmc/hwpmc_ibs.c b/sys/dev/hwpmc/hwpmc_ibs.c index 66d3260cf040..bfc135f06884 100644 --- a/sys/dev/hwpmc/hwpmc_ibs.c +++ b/sys/dev/hwpmc/hwpmc_ibs.c @@ -1,8 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2026, Ali Jose Mashtizadeh - * All rights reserved. + * Copyright (c) 2026, Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/sys/dev/hwpmc/hwpmc_ibs.h b/sys/dev/hwpmc/hwpmc_ibs.h index 4449b44c8368..c66d54672543 100644 --- a/sys/dev/hwpmc/hwpmc_ibs.h +++ b/sys/dev/hwpmc/hwpmc_ibs.h @@ -1,8 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2026, Ali Jose Mashtizadeh - * All rights reserved. + * Copyright (c) 2026, Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -67,6 +66,18 @@ #define IBS_CTL_LVTOFFSETVALID (1ULL << 8) #define IBS_CTL_LVTOFFSETMASK 0x0000000F +/* + * The minimum sampling rate was selected to match the default used by other + * counters that was also found to be experimentally stable by providing enough + * time between consecutive NMIs. The maximum sample rate is determined by + * setting all available counter bits, i.e., all available bits except the + * bottom four that are zero extended. + */ +#define IBS_FETCH_MIN_RATE 65536 +#define IBS_FETCH_MAX_RATE 1048560 +#define IBS_OP_MIN_RATE 65536 +#define IBS_OP_MAX_RATE 134217712 + /* IBS Fetch Control */ #define IBS_FETCH_CTL 0xC0011030 /* IBS Fetch Control */ #define IBS_FETCH_CTL_L3MISS (1ULL << 61) /* L3 Cache Miss */ @@ -82,7 +93,8 @@ #define IBS_FETCH_CTL_ENABLE (1ULL << 48) /* Enable */ #define IBS_FETCH_CTL_MAXCNTMASK 0x0000FFFFULL -#define IBS_FETCH_CTL_TO_LAT(_c) ((_c >> 32) & 0x0000FFFF) +#define IBS_FETCH_INTERVAL_TO_CTL(_c) (((_c) >> 4) & 0x0000FFFF) +#define IBS_FETCH_CTL_TO_LAT(_c) (((_c) >> 32) & 0x0000FFFF) #define IBS_FETCH_LINADDR 0xC0011031 /* Fetch Linear Address */ #define IBS_FETCH_PHYSADDR 0xC0011032 /* Fetch Physical Address */ @@ -95,12 +107,16 @@ /* IBS Execution Control */ #define IBS_OP_CTL 0xC0011033 /* IBS Execution Control */ +#define IBS_OP_CTL_LATFLTEN (1ULL << 63) /* Load Latency Filtering */ #define IBS_OP_CTL_COUNTERCONTROL (1ULL << 19) /* Counter Control */ #define IBS_OP_CTL_VALID (1ULL << 18) /* Valid */ #define IBS_OP_CTL_ENABLE (1ULL << 17) /* Enable */ #define IBS_OP_CTL_L3MISSONLY (1ULL << 16) /* L3 Miss Filtering */ #define IBS_OP_CTL_MAXCNTMASK 0x0000FFFFULL +#define IBS_OP_CTL_LDLAT_TO_CTL(_c) ((((ldlat) >> 7) - 1) << 59) +#define IBS_OP_INTERVAL_TO_CTL(_c) ((((_c) >> 4) & 0x0000FFFFULL) | ((_c) & 0x07F00000)) + #define IBS_OP_RIP 0xC0011034 /* IBS Op RIP */ #define IBS_OP_DATA 0xC0011035 /* IBS Op Data */ #define IBS_OP_DATA_RIPINVALID (1ULL << 38) /* RIP Invalid */ diff --git a/sys/dev/hwpmc/hwpmc_intel.c b/sys/dev/hwpmc/hwpmc_intel.c index e1788a9ea409..942cadfae4cf 100644 --- a/sys/dev/hwpmc/hwpmc_intel.c +++ b/sys/dev/hwpmc/hwpmc_intel.c @@ -247,6 +247,12 @@ pmc_intel_initialize(void) cputype = PMC_CPU_INTEL_ATOM_TREMONT; nclasses = 3; break; + case 0xAA: + case 0xAC: + case 0xB5: + cputype = PMC_CPU_INTEL_METEOR_LAKE; + nclasses = 3; + break; case 0xAD: case 0xAE: cputype = PMC_CPU_INTEL_GRANITE_RAPIDS; diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c index 8fd7ef06a977..4f507523b6ab 100644 --- a/sys/dev/hwpmc/hwpmc_logging.c +++ b/sys/dev/hwpmc/hwpmc_logging.c @@ -58,6 +58,7 @@ #include <sys/uio.h> #include <sys/unistd.h> #include <sys/vnode.h> +#include <sys/syslog.h> #if defined(__i386__) || defined(__amd64__) #include <machine/clock.h> @@ -1236,24 +1237,39 @@ pmclog_initialize(void) struct pmclog_buffer *plb; int domain, ncpus, total; - if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) { - (void) printf("hwpmc: tunable logbuffersize=%d must be " - "greater than zero and less than or equal to 16MB.\n", - pmclog_buffer_size); + if (pmclog_buffer_size <= 0 || + pmclog_buffer_size > PMC_LOG_BUFFER_SIZE_MAX) { + log(LOG_WARNING, + "hwpmc: logbuffersize=%d must be greater than zero " + "and less than or equal to %d, resetting to %d\n", + pmclog_buffer_size, PMC_LOG_BUFFER_SIZE_MAX, + PMC_LOG_BUFFER_SIZE); + pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; } if (pmc_nlogbuffers_pcpu <= 0) { - (void) printf("hwpmc: tunable nlogbuffers=%d must be greater " - "than zero.\n", pmc_nlogbuffers_pcpu); + log(LOG_WARNING, + "hwpmc: nbuffers_pcpu=%d must be greater than zero, " + "resetting to %d\n", + pmc_nlogbuffers_pcpu, PMC_NLOGBUFFERS_PCPU); + pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; } - if (pmc_nlogbuffers_pcpu*pmclog_buffer_size > 32*1024) { - (void) printf("hwpmc: memory allocated pcpu must be less than 32MB (is %dK).\n", - pmc_nlogbuffers_pcpu*pmclog_buffer_size); + + if (pmc_nlogbuffers_pcpu * pmclog_buffer_size > + PMC_NLOGBUFFERS_PCPU_MEM_MAX) { + log(LOG_WARNING, + "hwpmc: nbuffers_pcpu=%d * logbuffersize=%d exceeds " + "%dMB per CPU limit, resetting to defaults (%d * %d)\n", + pmc_nlogbuffers_pcpu, pmclog_buffer_size, + PMC_NLOGBUFFERS_PCPU_MEM_MAX / 1024, + PMC_NLOGBUFFERS_PCPU, PMC_LOG_BUFFER_SIZE); + pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; } + for (domain = 0; domain < vm_ndomains; domain++) { ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; total = ncpus * pmc_nlogbuffers_pcpu; @@ -1270,6 +1286,7 @@ pmclog_initialize(void) pmc_plb_rele_unlocked(plb); } } + mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF); } diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c index fb1fdf832398..6133b52b516f 100644 --- a/sys/dev/hwpmc/hwpmc_mod.c +++ b/sys/dev/hwpmc/hwpmc_mod.c @@ -818,11 +818,9 @@ pmc_force_context_switch(void) uint64_t pmc_rdtsc(void) { -#if defined(__i386__) || defined(__amd64__) - if (__predict_true(amd_feature & AMDID_RDTSCP)) - return (rdtscp()); - else - return (rdtsc()); +#if defined(__i386__) + /* Unfortunately get_cyclecount on i386 uses cpu_ticks. */ + return (rdtsc()); #else return (get_cyclecount()); #endif diff --git a/sys/dev/hyperv/vmbus/x86/hyperv_reg.h b/sys/dev/hyperv/vmbus/x86/hyperv_reg.h index 0597a1fea953..e7560d00f25e 100644 --- a/sys/dev/hyperv/vmbus/x86/hyperv_reg.h +++ b/sys/dev/hyperv/vmbus/x86/hyperv_reg.h @@ -45,4 +45,10 @@ #define CPUID_LEAF_HV_IDENTITY 0x40000002 #define CPUID_LEAF_HV_FEATURES 0x40000003 #define CPUID_LEAF_HV_RECOMMENDS 0x40000004 + +#define CPUID_LEAF_HV_STACK_INTERFACE 0x40000081 +#define HYPERV_STACK_INTERFACE_EAX_SIG 0x31235356 /* "VS#1" */ +#define CPUID_LEAF_HV_STACK_PROPERTIES 0x40000082 +#define HYPERV_PROPERTIES_EXT_DEST_ID 0x00000004 + #endif /* !_HYPERV_REG_H_ */ diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h index cdefe7e013f6..ba6714c5c7b6 100644 --- a/sys/dev/mlx5/driver.h +++ b/sys/dev/mlx5/driver.h @@ -890,6 +890,7 @@ struct mlx5_cmd_work_ent { u16 op; u8 busy; bool polling; + struct work_struct freew; }; struct mlx5_pas { diff --git a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c index 86c721a83cb7..e314a04c294f 100644 --- a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c +++ b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c @@ -802,6 +802,15 @@ static void cb_timeout_handler(struct work_struct *work) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS); } +static void +cmd_free_work(struct work_struct *work) +{ + struct mlx5_cmd_work_ent *ent = container_of(work, + struct mlx5_cmd_work_ent, freew); + + free_cmd(ent); +} + static void complete_command(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd *cmd = ent->cmd; @@ -856,7 +865,8 @@ static void complete_command(struct mlx5_cmd_work_ent *ent) free_msg(dev, ent->in); err = err ? err : ent->status; - free_cmd(ent); + INIT_WORK(&ent->freew, cmd_free_work); + schedule_work(&ent->freew); callback(err, context); } else { complete(&ent->done); diff --git a/sys/dev/nvmf/controller/nvmft_controller.c b/sys/dev/nvmf/controller/nvmft_controller.c index 1618c1f96dac..4c1b28b89265 100644 --- a/sys/dev/nvmf/controller/nvmft_controller.c +++ b/sys/dev/nvmf/controller/nvmft_controller.c @@ -227,7 +227,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np, enum nvmf_trtype trtype, if (ctrlr == NULL) { mtx_unlock(&np->lock); printf("NVMFT: Nonexistent controller %u for I/O queue %u from %.*s\n", - ctrlr->cntlid, qid, (int)sizeof(data->hostnqn), + cntlid, qid, (int)sizeof(data->hostnqn), data->hostnqn); nvmft_connect_invalid_parameters(qp, cmd, true, offsetof(struct nvmf_fabric_connect_data, cntlid)); diff --git a/sys/dev/rge/if_rge.c b/sys/dev/rge/if_rge.c index e5297edfefbe..0007b07e0fa6 100644 --- a/sys/dev/rge/if_rge.c +++ b/sys/dev/rge/if_rge.c @@ -959,29 +959,24 @@ rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) case SIOCSIFFLAGS: RGE_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) != 0) { - if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { - /* - * TODO: handle promisc/iffmulti changing - * without reprogramming everything. - */ + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if (((if_getflags(ifp) ^ sc->rge_if_flags) + & (IFF_PROMISC | IFF_ALLMULTI)) != 0) + rge_iff_locked(sc); + } else rge_init_locked(sc); - } else { - /* Reinit promisc/multi just in case */ - rge_iff_locked(sc); - } } else { - if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) rge_stop_locked(sc); - } } + sc->rge_if_flags = if_getflags(ifp); RGE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RGE_LOCK(sc); - if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0) { + if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0) rge_iff_locked(sc); - } RGE_UNLOCK(sc); break; case SIOCGIFMEDIA: @@ -2104,9 +2099,10 @@ rge_rxeof(struct rge_queues *q, struct mbufq *mq) uint32_t rxstat, extsts; int i, mlen, rx = 0; int cons, prod; - int maxpkt = 16; /* XXX TODO: make this a tunable */ + int maxpkt; bool check_hwcsum; + maxpkt = sc->sc_rx_process_limit; check_hwcsum = ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0); RGE_ASSERT_LOCKED(sc); diff --git a/sys/dev/rge/if_rge_sysctl.c b/sys/dev/rge/if_rge_sysctl.c index a7d6e1572168..16001b4c1d94 100644 --- a/sys/dev/rge/if_rge_sysctl.c +++ b/sys/dev/rge/if_rge_sysctl.c @@ -232,6 +232,11 @@ rge_sysctl_attach(struct rge_softc *sc) "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); + sc->sc_rx_process_limit = 16; + SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "rx_process_limit", CTLFLAG_RW, &sc->sc_rx_process_limit, 0, + "max number of RX packets to process per interrupt"); + /* Stats */ rge_sysctl_drv_stats_attach(sc); rge_sysctl_mac_stats_attach(sc); diff --git a/sys/dev/rge/if_rgevar.h b/sys/dev/rge/if_rgevar.h index 6228f9ff229e..89d02e8acb72 100644 --- a/sys/dev/rge/if_rgevar.h +++ b/sys/dev/rge/if_rgevar.h @@ -200,8 +200,12 @@ struct rge_softc { #define RGE_IMTYPE_SIM 1 int sc_watchdog; + int rge_if_flags; + uint32_t sc_debug; + int sc_rx_process_limit; + struct rge_drv_stats sc_drv_stats; struct rge_mac_stats sc_mac_stats; diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c index 4fd0e3dcf134..e70f76a44ed9 100644 --- a/sys/dev/sound/midi/midi.c +++ b/sys/dev/sound/midi/midi.c @@ -421,7 +421,7 @@ midi_write(struct cdev *i_dev, struct uio *uio, int ioflag) int used; char buf[MIDI_WSIZE]; - retval = 0; + retval = EIO; if (m == NULL) goto err0; diff --git a/sys/dev/sound/pcm/ac97.c b/sys/dev/sound/pcm/ac97.c index 14ff2f6a62ab..73a1e0280e56 100644 --- a/sys/dev/sound/pcm/ac97.c +++ b/sys/dev/sound/pcm/ac97.c @@ -125,12 +125,7 @@ static const struct ac97_vendorid ac97vendorid[] = { { 0x57454300, "Winbond" }, { 0x574d4c00, "Wolfson" }, { 0x594d4800, "Yamaha" }, - /* - * XXX This is a fluke, really! The real vendor - * should be SigmaTel, not this! This should be - * removed someday! - */ - { 0x01408300, "Creative" }, + { 0x01408300, "SigmaTel" }, { 0x00000000, NULL } }; @@ -238,12 +233,7 @@ static struct ac97_codecid ac97codecid[] = { { 0x594d4800, 0x00, 0, "YMF743", 0 }, { 0x594d4802, 0x00, 0, "YMF752", 0 }, { 0x594d4803, 0x00, 0, "YMF753", 0 }, - /* - * XXX This is a fluke, really! The real codec - * should be STAC9704, not this! This should be - * removed someday! - */ - { 0x01408384, 0x00, 0, "EV1938", 0 }, + { 0x01408384, 0x00, 0, "STAC9704", 0 }, { 0, 0, 0, NULL, 0 } }; @@ -1104,10 +1094,6 @@ ac97mix_uninit(struct snd_mixer *m) if (codec == NULL) return -1; - /* - if (ac97_uninitmixer(codec)) - return -1; - */ ac97_destroy(codec); return 0; } diff --git a/sys/dev/sound/pcm/buffer.c b/sys/dev/sound/pcm/buffer.c index 1db9e5661dc8..0c574ae2908c 100644 --- a/sys/dev/sound/pcm/buffer.c +++ b/sys/dev/sound/pcm/buffer.c @@ -506,29 +506,11 @@ sndbuf_dispose(struct snd_dbuf *b, u_int8_t *to, unsigned int count) return 0; } -#ifdef SND_DIAGNOSTIC -static uint32_t snd_feeder_maxfeed = 0; -SYSCTL_UINT(_hw_snd, OID_AUTO, feeder_maxfeed, CTLFLAG_RD, - &snd_feeder_maxfeed, 0, "maximum feeder count request"); - -static uint32_t snd_feeder_maxcycle = 0; -SYSCTL_UINT(_hw_snd, OID_AUTO, feeder_maxcycle, CTLFLAG_RD, - &snd_feeder_maxcycle, 0, "maximum feeder cycle"); -#endif - /* count is number of bytes we want added to destination buffer */ int sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *channel, struct pcm_feeder *feeder, unsigned int count) { unsigned int cnt, maxfeed; -#ifdef SND_DIAGNOSTIC - unsigned int cycle; - - if (count > snd_feeder_maxfeed) - snd_feeder_maxfeed = count; - - cycle = 0; -#endif KASSERT(count > 0, ("can't feed 0 bytes")); @@ -544,16 +526,8 @@ sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *chan break; sndbuf_acquire(to, to->tmpbuf, cnt); count -= cnt; -#ifdef SND_DIAGNOSTIC - cycle++; -#endif } while (count != 0); -#ifdef SND_DIAGNOSTIC - if (cycle > snd_feeder_maxcycle) - snd_feeder_maxcycle = cycle; -#endif - return (0); } diff --git a/sys/dev/sound/pcm/channel.c b/sys/dev/sound/pcm/channel.c index b74f76fd21ca..67bbfba28177 100644 --- a/sys/dev/sound/pcm/channel.c +++ b/sys/dev/sound/pcm/channel.c @@ -143,7 +143,7 @@ chn_vpc_proc(int reset, int db) PCM_ACQUIRE(d); CHN_FOREACH(c, d, channels.pcm) { CHN_LOCK(c); - CHN_SETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_VOL_0DB, db); + chn_setvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_VOL_0DB, db); if (reset != 0) chn_vpc_reset(c, SND_VOL_C_PCM, 1); CHN_UNLOCK(c); @@ -1144,7 +1144,6 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls, int dir, void *devinfo) { struct pcm_channel *c; - struct feeder_class *fc; struct snd_dbuf *b, *bs; char buf[CHN_NAMELEN]; int err, i, direction, *vchanrate, *vchanformat; @@ -1217,17 +1216,6 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls, chn_vpc_reset(c, SND_VOL_C_PCM, 1); CHN_UNLOCK(c); - fc = feeder_getclass(FEEDER_ROOT); - if (fc == NULL) { - device_printf(d->dev, "%s(): failed to get feeder class\n", - __func__); - goto fail; - } - if (feeder_add(c, fc, NULL)) { - device_printf(d->dev, "%s(): failed to add feeder\n", __func__); - goto fail; - } - b = sndbuf_create(c, "primary"); bs = sndbuf_create(c, "secondary"); if (b == NULL || bs == NULL) { @@ -1647,7 +1635,7 @@ chn_vpc_reset(struct pcm_channel *c, int vc, int force) return; for (i = SND_CHN_T_BEGIN; i <= SND_CHN_T_END; i += SND_CHN_T_STEP) - CHN_SETVOLUME(c, vc, i, c->volume[vc][SND_CHN_T_VOL_0DB]); + chn_setvolume_matrix(c, vc, i, c->volume[vc][SND_CHN_T_VOL_0DB]); } static u_int32_t diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h index 6415f5c88984..f964480369a1 100644 --- a/sys/dev/sound/pcm/channel.h +++ b/sys/dev/sound/pcm/channel.h @@ -255,7 +255,6 @@ struct pcm_channel { #include "channel_if.h" -int chn_reinit(struct pcm_channel *c); int chn_write(struct pcm_channel *c, struct uio *buf); int chn_read(struct pcm_channel *c, struct uio *buf); u_int32_t chn_start(struct pcm_channel *c, int force); @@ -306,15 +305,6 @@ int chn_notify(struct pcm_channel *c, u_int32_t flags); int chn_getrates(struct pcm_channel *c, int **rates); int chn_syncdestroy(struct pcm_channel *c); -#define CHN_SETVOLUME(...) chn_setvolume_matrix(__VA_ARGS__) -#if defined(SND_DIAGNOSTIC) || defined(INVARIANTS) -#define CHN_GETVOLUME(...) chn_getvolume_matrix(__VA_ARGS__) -#else -#define CHN_GETVOLUME(x, y, z) ((x)->volume[y][z]) -#endif - -#define CHN_GETMUTE(x, y, z) ((x)->muted[y][z]) - #ifdef OSSV4_EXPERIMENT int chn_getpeaks(struct pcm_channel *c, int *lpeak, int *rpeak); #endif diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c index c1e836691ac7..797bfba81023 100644 --- a/sys/dev/sound/pcm/dsp.c +++ b/sys/dev/sound/pcm/dsp.c @@ -607,8 +607,9 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch, case MIXER_READ(0): switch (j) { case SOUND_MIXER_MUTE: - mute = CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FL) || - CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FR); + mute = chn_getmute_matrix(ch, + SND_VOL_C_PCM, SND_CHN_T_FL) || + chn_getmute_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FR); if (ch->direction == PCMDIR_REC) { *(int *)arg = mute << SOUND_MIXER_RECLEV; } else { @@ -618,17 +619,17 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch, case SOUND_MIXER_PCM: if (ch->direction != PCMDIR_PLAY) break; - *(int *)arg = CHN_GETVOLUME(ch, + *(int *)arg = chn_getvolume_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FL); - *(int *)arg |= CHN_GETVOLUME(ch, + *(int *)arg |= chn_getvolume_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FR) << 8; break; case SOUND_MIXER_RECLEV: if (ch->direction != PCMDIR_REC) break; - *(int *)arg = CHN_GETVOLUME(ch, + *(int *)arg = chn_getvolume_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FL); - *(int *)arg |= CHN_GETVOLUME(ch, + *(int *)arg |= chn_getvolume_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FR) << 8; break; case SOUND_MIXER_DEVMASK: diff --git a/sys/dev/sound/pcm/feeder.h b/sys/dev/sound/pcm/feeder.h index 1f106787ee83..834df463295d 100644 --- a/sys/dev/sound/pcm/feeder.h +++ b/sys/dev/sound/pcm/feeder.h @@ -163,21 +163,3 @@ int feeder_matrix_oss_get_channel_order(struct pcmchan_matrix *, unsigned long long *); int feeder_matrix_oss_set_channel_order(struct pcmchan_matrix *, unsigned long long *); - -/* - * By default, various feeders only deal with sign 16/32 bit native-endian - * since it should provide the fastest processing path. Processing 8bit samples - * is too noisy due to limited dynamic range, while 24bit is quite slow due to - * unnatural per-byte read/write. However, for debugging purposes, ensuring - * implementation correctness and torture test, the following can be defined: - * - * SND_FEEDER_MULTIFORMAT - Compile all type of converters, but force - * 8bit samples to be converted to 16bit - * native-endian for better dynamic range. - * Process 24bit samples natively. - * SND_FEEDER_FULL_MULTIFORMAT - Ditto, but process 8bit samples natively. - */ -#ifdef SND_FEEDER_FULL_MULTIFORMAT -#undef SND_FEEDER_MULTIFORMAT -#define SND_FEEDER_MULTIFORMAT 1 -#endif diff --git a/sys/dev/sound/pcm/feeder_chain.c b/sys/dev/sound/pcm/feeder_chain.c index 4ec50d810253..4fc846f77496 100644 --- a/sys/dev/sound/pcm/feeder_chain.c +++ b/sys/dev/sound/pcm/feeder_chain.c @@ -66,13 +66,7 @@ struct feeder_chain_desc { #define FEEDER_CHAIN_FULLMULTI 4 #define FEEDER_CHAIN_LAST 5 -#if defined(SND_FEEDER_FULL_MULTIFORMAT) #define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_FULLMULTI -#elif defined(SND_FEEDER_MULTIFORMAT) -#define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_MULTI -#else -#define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_LEAN -#endif /* * List of preferred formats that might be required during @@ -126,7 +120,7 @@ static uint32_t *feeder_chain_formats[FEEDER_CHAIN_LAST] = { static int feeder_chain_mode = FEEDER_CHAIN_DEFAULT; -#if defined(_KERNEL) && defined(SND_DEBUG) && defined(SND_FEEDER_FULL_MULTIFORMAT) +#if defined(_KERNEL) SYSCTL_INT(_hw_snd, OID_AUTO, feeder_chain_mode, CTLFLAG_RWTUN, &feeder_chain_mode, 0, "feeder chain mode " @@ -589,12 +583,8 @@ feeder_chain(struct pcm_channel *c) case FEEDER_CHAIN_LEAN: case FEEDER_CHAIN_16: case FEEDER_CHAIN_32: -#if defined(SND_FEEDER_MULTIFORMAT) || defined(SND_FEEDER_FULL_MULTIFORMAT) case FEEDER_CHAIN_MULTI: -#endif -#if defined(SND_FEEDER_FULL_MULTIFORMAT) case FEEDER_CHAIN_FULLMULTI: -#endif break; default: feeder_chain_mode = FEEDER_CHAIN_DEFAULT; diff --git a/sys/dev/sound/pcm/feeder_mixer.c b/sys/dev/sound/pcm/feeder_mixer.c index 8c58e1c8ef33..be78b0cffb64 100644 --- a/sys/dev/sound/pcm/feeder_mixer.c +++ b/sys/dev/sound/pcm/feeder_mixer.c @@ -43,9 +43,6 @@ #include "snd_fxdiv_gen.h" #endif -#undef SND_FEEDER_MULTIFORMAT -#define SND_FEEDER_MULTIFORMAT 1 - struct feed_mixer_info { uint32_t format; uint32_t channels; @@ -174,14 +171,6 @@ feed_mixer_rec(struct pcm_channel *c) CHN_UNLOCK(ch); continue; } -#ifdef SND_DEBUG - if ((c->flags & CHN_F_DIRTY) && VCHAN_SYNC_REQUIRED(ch)) { - if (vchan_sync(ch) != 0) { - CHN_UNLOCK(ch); - continue; - } - } -#endif bs = ch->bufsoft; if (ch->flags & CHN_F_MMAP) sndbuf_dispose(bs, NULL, sndbuf_getready(bs)); @@ -270,14 +259,6 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b, CHN_UNLOCK(ch); continue; } -#ifdef SND_DEBUG - if ((c->flags & CHN_F_DIRTY) && VCHAN_SYNC_REQUIRED(ch)) { - if (vchan_sync(ch) != 0) { - CHN_UNLOCK(ch); - continue; - } - } -#endif if ((ch->flags & CHN_F_MMAP) && !(ch->flags & CHN_F_CLOSING)) sndbuf_acquire(ch->bufsoft, NULL, sndbuf_getfree(ch->bufsoft)); diff --git a/sys/dev/sound/pcm/feeder_rate.c b/sys/dev/sound/pcm/feeder_rate.c index aee164840c4a..173f7811f547 100644 --- a/sys/dev/sound/pcm/feeder_rate.c +++ b/sys/dev/sound/pcm/feeder_rate.c @@ -89,21 +89,6 @@ #define Z_RATE_DEFAULT 48000 -#define Z_RATE_MIN FEEDRATE_RATEMIN -#define Z_RATE_MAX FEEDRATE_RATEMAX -#define Z_ROUNDHZ FEEDRATE_ROUNDHZ -#define Z_ROUNDHZ_MIN FEEDRATE_ROUNDHZ_MIN -#define Z_ROUNDHZ_MAX FEEDRATE_ROUNDHZ_MAX - -#define Z_RATE_SRC FEEDRATE_SRC -#define Z_RATE_DST FEEDRATE_DST -#define Z_RATE_QUALITY FEEDRATE_QUALITY -#define Z_RATE_CHANNELS FEEDRATE_CHANNELS - -#define Z_PARANOID 1 - -#define Z_MULTIFORMAT 1 - #ifdef _KERNEL #undef Z_USE_ALPHADRIFT #define Z_USE_ALPHADRIFT 1 @@ -151,9 +136,9 @@ struct z_info { z_resampler_t z_resample; }; -int feeder_rate_min = Z_RATE_MIN; -int feeder_rate_max = Z_RATE_MAX; -int feeder_rate_round = Z_ROUNDHZ; +int feeder_rate_min = FEEDRATE_RATEMIN; +int feeder_rate_max = FEEDRATE_RATEMAX; +int feeder_rate_round = FEEDRATE_ROUNDHZ; int feeder_rate_quality = Z_QUALITY_DEFAULT; static int feeder_rate_polyphase_max = Z_POLYPHASE_MAX; @@ -222,10 +207,10 @@ sysctl_hw_snd_feeder_rate_round(SYSCTL_HANDLER_ARGS) if (err != 0 || req->newptr == NULL || val == feeder_rate_round) return (err); - if (val < Z_ROUNDHZ_MIN || val > Z_ROUNDHZ_MAX) + if (val < FEEDRATE_ROUNDHZ_MIN || val > FEEDRATE_ROUNDHZ_MAX) return (EINVAL); - feeder_rate_round = val - (val % Z_ROUNDHZ); + feeder_rate_round = val - (val % FEEDRATE_ROUNDHZ); return (0); } @@ -622,15 +607,10 @@ z_feed_sinc_polyphase_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \ Z_DECLARE_SINC(SIGN, BIT, ENDIAN) \ Z_DECLARE_SINC_POLYPHASE(SIGN, BIT, ENDIAN) -#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) Z_DECLARE(S, 16, LE) Z_DECLARE(S, 32, LE) -#endif -#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) Z_DECLARE(S, 16, BE) Z_DECLARE(S, 32, BE) -#endif -#ifdef SND_FEEDER_MULTIFORMAT Z_DECLARE(S, 8, NE) Z_DECLARE(S, 24, LE) Z_DECLARE(S, 24, BE) @@ -643,7 +623,6 @@ Z_DECLARE(U, 24, BE) Z_DECLARE(U, 32, BE) Z_DECLARE(F, 32, LE) Z_DECLARE(F, 32, BE) -#endif enum { Z_RESAMPLER_ZOH, @@ -672,15 +651,10 @@ static const struct { uint32_t format; z_resampler_t resampler[Z_RESAMPLER_LAST]; } z_resampler_tab[] = { -#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) Z_RESAMPLER_ENTRY(S, 16, LE), Z_RESAMPLER_ENTRY(S, 32, LE), -#endif -#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) Z_RESAMPLER_ENTRY(S, 16, BE), Z_RESAMPLER_ENTRY(S, 32, BE), -#endif -#ifdef SND_FEEDER_MULTIFORMAT Z_RESAMPLER_ENTRY(S, 8, NE), Z_RESAMPLER_ENTRY(S, 24, LE), Z_RESAMPLER_ENTRY(S, 24, BE), @@ -693,7 +667,6 @@ static const struct { Z_RESAMPLER_ENTRY(U, 32, BE), Z_RESAMPLER_ENTRY(F, 32, LE), Z_RESAMPLER_ENTRY(F, 32, BE), -#endif }; #define Z_RESAMPLER_TAB_SIZE \ @@ -728,7 +701,6 @@ z_resampler_reset(struct z_info *info) info->quality = Z_QUALITY_MAX; } -#ifdef Z_PARANOID static int32_t z_resampler_sinc_len(struct z_info *info) { @@ -766,9 +738,6 @@ z_resampler_sinc_len(struct z_info *info) return (len); } -#else -#define z_resampler_sinc_len(i) (Z_IS_SINC(i) ? Z_SINC_LEN(i) : 1) -#endif #define Z_POLYPHASE_COEFF_SHIFT 0 @@ -1422,21 +1391,21 @@ z_resampler_set(struct pcm_feeder *f, int what, int32_t value) info = f->data; switch (what) { - case Z_RATE_SRC: + case FEEDRATE_SRC: if (value < feeder_rate_min || value > feeder_rate_max) return (E2BIG); if (value == info->rsrc) return (0); info->rsrc = value; break; - case Z_RATE_DST: + case FEEDRATE_DST: if (value < feeder_rate_min || value > feeder_rate_max) return (E2BIG); if (value == info->rdst) return (0); info->rdst = value; break; - case Z_RATE_QUALITY: + case FEEDRATE_QUALITY: if (value < Z_QUALITY_MIN || value > Z_QUALITY_MAX) return (EINVAL); if (value == info->quality) @@ -1453,7 +1422,7 @@ z_resampler_set(struct pcm_feeder *f, int what, int32_t value) return (0); info->quality = oquality; break; - case Z_RATE_CHANNELS: + case FEEDRATE_CHANNELS: if (value < SND_CHN_MIN || value > SND_CHN_MAX) return (EINVAL); if (value == info->channels) @@ -1475,13 +1444,13 @@ z_resampler_get(struct pcm_feeder *f, int what) info = f->data; switch (what) { - case Z_RATE_SRC: + case FEEDRATE_SRC: return (info->rsrc); - case Z_RATE_DST: + case FEEDRATE_DST: return (info->rdst); - case Z_RATE_QUALITY: + case FEEDRATE_QUALITY: return (info->quality); - case Z_RATE_CHANNELS: + case FEEDRATE_CHANNELS: return (info->channels); } diff --git a/sys/dev/sound/pcm/feeder_volume.c b/sys/dev/sound/pcm/feeder_volume.c index e43b2594c7e0..5f40816b4065 100644 --- a/sys/dev/sound/pcm/feeder_volume.c +++ b/sys/dev/sound/pcm/feeder_volume.c @@ -74,15 +74,10 @@ feed_volume_##SIGN##BIT##ENDIAN(int *vol, int *matrix, \ } while (--count != 0); \ } -#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) FEEDVOLUME_DECLARE(S, 16, LE) FEEDVOLUME_DECLARE(S, 32, LE) -#endif -#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) FEEDVOLUME_DECLARE(S, 16, BE) FEEDVOLUME_DECLARE(S, 32, BE) -#endif -#ifdef SND_FEEDER_MULTIFORMAT FEEDVOLUME_DECLARE(S, 8, NE) FEEDVOLUME_DECLARE(S, 24, LE) FEEDVOLUME_DECLARE(S, 24, BE) @@ -95,7 +90,6 @@ FEEDVOLUME_DECLARE(U, 24, BE) FEEDVOLUME_DECLARE(U, 32, BE) FEEDVOLUME_DECLARE(F, 32, LE) FEEDVOLUME_DECLARE(F, 32, BE) -#endif struct feed_volume_info { uint32_t bps, channels; @@ -115,15 +109,10 @@ static const struct { uint32_t format; feed_volume_t apply; } feed_volume_info_tab[] = { -#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) FEEDVOLUME_ENTRY(S, 16, LE), FEEDVOLUME_ENTRY(S, 32, LE), -#endif -#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT) FEEDVOLUME_ENTRY(S, 16, BE), FEEDVOLUME_ENTRY(S, 32, BE), -#endif -#ifdef SND_FEEDER_MULTIFORMAT FEEDVOLUME_ENTRY(S, 8, NE), FEEDVOLUME_ENTRY(S, 24, LE), FEEDVOLUME_ENTRY(S, 24, BE), @@ -136,7 +125,6 @@ static const struct { FEEDVOLUME_ENTRY(U, 32, BE), FEEDVOLUME_ENTRY(F, 32, LE), FEEDVOLUME_ENTRY(F, 32, BE), -#endif }; #define FEEDVOLUME_TAB_SIZE ((int32_t) \ diff --git a/sys/dev/sound/pcm/matrix.h b/sys/dev/sound/pcm/matrix.h index e2798c651536..ffac162f41a1 100644 --- a/sys/dev/sound/pcm/matrix.h +++ b/sys/dev/sound/pcm/matrix.h @@ -29,11 +29,6 @@ #ifndef _SND_MATRIX_H_ #define _SND_MATRIX_H_ -#undef SND_MULTICHANNEL -#ifndef SND_OLDSTEREO -#define SND_MULTICHANNEL 1 -#endif - /* * XXX = unused, but part of the definition (will be used someday, maybe). */ @@ -176,18 +171,12 @@ #define SND_CHN_T_END SND_CHN_T_TBR #define SND_CHN_T_STEP 1 #define SND_CHN_MIN 1 - -#ifdef SND_MULTICHANNEL #define SND_CHN_MAX 8 -#else -#define SND_CHN_MAX 2 -#endif /* * Multichannel interleaved volume matrix. Each calculated value relative * to master and 0db will be stored in each CLASS + 1 as long as - * chn_setvolume_matrix() or the equivalent CHN_SETVOLUME() macros is - * used (see channel.c). + * chn_setvolume_matrix() is used (see channel.c). */ #define SND_VOL_C_MASTER 0 #define SND_VOL_C_PCM 1 diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c index 55b61ccb4911..6ed2d0c3ce5c 100644 --- a/sys/dev/sound/pcm/mixer.c +++ b/sys/dev/sound/pcm/mixer.c @@ -1142,9 +1142,9 @@ mixer_ioctl_channel_proc: center = (left + right) >> 1; chn_setvolume_multi(c, SND_VOL_C_PCM, left, right, center); } else if ((cmd & ~0xff) == MIXER_READ(0)) { - *(int *)arg = CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FL); + *(int *)arg = chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FL); *(int *)arg |= - CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR) << 8; + chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FR) << 8; } CHN_UNLOCK(c); diff --git a/sys/dev/sound/sndstat.c b/sys/dev/sound/sndstat.c index b0ac7f7d0824..c28a932c784e 100644 --- a/sys/dev/sound/sndstat.c +++ b/sys/dev/sound/sndstat.c @@ -487,9 +487,9 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip) c->feedcount); nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_XRUNS, c->xruns); nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_LEFTVOL, - CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FL)); + chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FL)); nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_RIGHTVOL, - CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR)); + chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FR)); nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FORMAT, c->bufhard->fmt); nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_RATE, diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c index 7f49bae9ce5e..dd8aa8c9fafe 100644 --- a/sys/dev/sound/usb/uaudio.c +++ b/sys/dev/sound/usb/uaudio.c @@ -155,7 +155,6 @@ SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, debug, CTLFLAG_RWTUN, #define MAKE_WORD(h,l) (((h) << 8) | (l)) #define BIT_TEST(bm,bno) (((bm)[(bno) / 8] >> (7 - ((bno) % 8))) & 1) -#define UAUDIO_MAX_CHAN(x) (x) #define MIX(sc) ((sc)->sc_mixer_node) union uaudio_asid { @@ -556,9 +555,9 @@ static int umidi_open(struct usb_fifo *, int); static int umidi_ioctl(struct usb_fifo *, u_long cmd, void *, int); static void umidi_close(struct usb_fifo *, int); static void umidi_init(device_t dev); -static int umidi_probe(device_t dev); +static int umidi_attach(device_t dev); static int umidi_detach(device_t dev); -static int uaudio_hid_probe(struct uaudio_softc *sc, +static int uaudio_hid_attach(struct uaudio_softc *sc, struct usb_attach_arg *uaa); static void uaudio_hid_detach(struct uaudio_softc *sc); @@ -1101,7 +1100,7 @@ uaudio_attach(device_t dev) } if (sc->sc_midi_chan.valid) { - if (umidi_probe(dev)) { + if (umidi_attach(dev)) { goto detach; } device_printf(dev, "MIDI sequencer.\n"); @@ -1138,7 +1137,7 @@ uaudio_attach(device_t dev) bus_attach_children(dev); if (uaudio_handle_hid) { - if (uaudio_hid_probe(sc, uaa) == 0) { + if (uaudio_hid_attach(sc, uaa) == 0) { device_printf(dev, "HID volume keys found.\n"); } else { device_printf(dev, "No HID volume keys found.\n"); @@ -1993,7 +1992,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev, uint16_t wFormat; wFormat = UGETW(asid.v1->wFormatTag); - bChannels = UAUDIO_MAX_CHAN(asf1d.v1->bNrChannels); + bChannels = asf1d.v1->bNrChannels; bBitResolution = asf1d.v1->bSubFrameSize * 8; if (asf1d.v1->bSamFreqType == 0) { @@ -2074,8 +2073,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev, else chan_alt->usb_cfg = uaudio_cfg_play; - chan_alt->sample_size = (UAUDIO_MAX_CHAN(channels) * - p_fmt->bPrecision) / 8; + chan_alt->sample_size = (channels * p_fmt->bPrecision) / 8; chan_alt->channels = channels; if (ep_dir == UE_DIR_IN && @@ -5805,9 +5803,7 @@ tr_setup: } } - chan->curr_cable++; - if (chan->curr_cable >= chan->max_emb_jack) - chan->curr_cable = 0; + chan->curr_cable %= chan->max_emb_jack; if (chan->curr_cable == start_cable) { if (tr_any == 0) @@ -5987,7 +5983,7 @@ static struct usb_fifo_methods umidi_fifo_methods = { }; static int -umidi_probe(device_t dev) +umidi_attach(device_t dev) { struct uaudio_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); @@ -6174,7 +6170,7 @@ tr_setup: } static int -uaudio_hid_probe(struct uaudio_softc *sc, +uaudio_hid_attach(struct uaudio_softc *sc, struct usb_attach_arg *uaa) { void *d_ptr; diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c index 22af8ee8663c..b0d285e3c603 100644 --- a/sys/dev/uart/uart_bus_pci.c +++ b/sys/dev/uart/uart_bus_pci.c @@ -297,7 +297,7 @@ uart_pci_probe(device_t dev) } if (pci_get_class(dev) == PCIC_SIMPLECOMM && pci_get_subclass(dev) == PCIS_SIMPLECOMM_UART && - pci_get_progif(dev) < PCIP_SIMPLECOMM_UART_16550A) { + pci_get_progif(dev) <= PCIP_SIMPLECOMM_UART_16550A) { /* XXX rclk what to do */ id = &cid; sc->sc_class = &uart_ns8250_class; @@ -339,7 +339,8 @@ uart_pci_attach(device_t dev) * suggests this is only reliable when one MSI vector is advertised. */ id = uart_pci_match(dev, pci_ns8250_ids); - if ((id == NULL || (id->rid & PCI_NO_MSI) == 0) && + /* Always disable MSI for generic devices. */ + if (id != NULL && (id->rid & PCI_NO_MSI) == 0 && pci_msi_count(dev) == 1) { count = 1; if (pci_alloc_msi(dev, &count) == 0) { diff --git a/sys/dev/ufshci/ufshci_acpi.c b/sys/dev/ufshci/ufshci_acpi.c new file mode 100644 index 000000000000..94da0d3cb411 --- /dev/null +++ b/sys/dev/ufshci/ufshci_acpi.c @@ -0,0 +1,248 @@ +/*- + * Copyright (c) 2026, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/buf.h> +#include <sys/bus.h> +#include <sys/conf.h> +#include <sys/proc.h> +#include <sys/smp.h> + +#include <vm/vm.h> + +#include <contrib/dev/acpica/include/acpi.h> + +#include <dev/acpica/acpivar.h> + +#include "ufshci_private.h" + +static int ufshci_acpi_probe(device_t); +static int ufshci_acpi_attach(device_t); +static int ufshci_acpi_detach(device_t); +static int ufshci_acpi_suspend(device_t); +static int ufshci_acpi_resume(device_t); + +static device_method_t ufshci_acpi_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ufshci_acpi_probe), + DEVMETHOD(device_attach, ufshci_acpi_attach), + DEVMETHOD(device_detach, ufshci_acpi_detach), + DEVMETHOD(device_suspend, ufshci_acpi_suspend), + DEVMETHOD(device_resume, ufshci_acpi_resume), { 0, 0 } +}; + +static driver_t ufshci_acpi_driver = { + "ufshci", + ufshci_acpi_methods, + sizeof(struct ufshci_controller), +}; + +DRIVER_MODULE(ufshci, acpi, ufshci_acpi_driver, 0, 0); +MODULE_DEPEND(ufshci, acpi, 1, 1, 1); + +static struct ufshci_acpi_device { + const char *hid; + const char *desc; + uint32_t ref_clk; + uint32_t quirks; +} ufshci_acpi_devices[] = { + { "QCOM24A5", "Qualcomm Snapdragon X Elite UFS Host Controller", + UFSHCI_REF_CLK_19_2MHz, + UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH | + UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP }, + { 0x00000000, NULL, 0, 0 } +}; + +static char *ufshci_acpi_ids[] = { "QCOM24A5", NULL }; + +static const struct ufshci_acpi_device * +ufshci_acpi_find_device(device_t dev) +{ + char *hid; + int i; + int rv; + + rv = ACPI_ID_PROBE(device_get_parent(dev), dev, ufshci_acpi_ids, &hid); + if (rv > 0) + return (NULL); + + for (i = 0; ufshci_acpi_devices[i].hid != NULL; i++) { + if (strcmp(ufshci_acpi_devices[i].hid, hid) != 0) + continue; + return (&ufshci_acpi_devices[i]); + } + + return (NULL); +} + +static int +ufshci_acpi_probe(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + const struct ufshci_acpi_device *acpi_dev; + + acpi_dev = ufshci_acpi_find_device(dev); + if (acpi_dev == NULL) + return (ENXIO); + + if (acpi_dev->hid) { + ctrlr->quirks = acpi_dev->quirks; + ctrlr->ref_clk = acpi_dev->ref_clk; + } + + if (acpi_dev->desc) { + device_set_desc(dev, acpi_dev->desc); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +static int +ufshci_acpi_allocate_memory(struct ufshci_controller *ctrlr) +{ + ctrlr->resource_id = 0; + ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->resource_id, RF_ACTIVE); + + if (ctrlr->resource == NULL) { + ufshci_printf(ctrlr, "unable to allocate acpi resource\n"); + return (ENOMEM); + } + + ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); + ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); + ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle; + + return (0); +} + +static int +ufshci_acpi_setup_shared(struct ufshci_controller *ctrlr) +{ + int error; + + ctrlr->num_io_queues = 1; + ctrlr->rid = 0; + ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, + &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); + if (ctrlr->res == NULL) { + ufshci_printf(ctrlr, "unable to allocate shared interrupt\n"); + return (ENOMEM); + } + + error = bus_setup_intr(ctrlr->dev, ctrlr->res, + INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler, + ctrlr, &ctrlr->tag); + if (error) { + ufshci_printf(ctrlr, "unable to setup shared interrupt\n"); + return (error); + } + + return (0); +} + +static int +ufshci_acpi_setup_interrupts(struct ufshci_controller *ctrlr) +{ + int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq; + + /* + * TODO: Need to implement MCQ(Multi Circular Queue) + * Example: num_io_queues = mp_ncpus; + */ + num_io_queues = 1; + TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues); + if (num_io_queues < 1 || num_io_queues > mp_ncpus) + num_io_queues = mp_ncpus; + + per_cpu_io_queues = 1; + TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues); + if (per_cpu_io_queues == 0) + num_io_queues = 1; + + min_cpus_per_ioq = smp_threads_per_core; + TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq); + if (min_cpus_per_ioq > 1) { + num_io_queues = min(num_io_queues, + max(1, mp_ncpus / min_cpus_per_ioq)); + } + + if (num_io_queues > vm_ndomains) + num_io_queues -= num_io_queues % vm_ndomains; + + ctrlr->num_io_queues = num_io_queues; + return (ufshci_acpi_setup_shared(ctrlr)); +} + +static int +ufshci_acpi_attach(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int status; + + ctrlr->dev = dev; + status = ufshci_acpi_allocate_memory(ctrlr); + if (status != 0) + goto bad; + + status = ufshci_acpi_setup_interrupts(ctrlr); + if (status != 0) + goto bad; + + return (ufshci_attach(dev)); +bad: + if (ctrlr->resource != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, + ctrlr->resource); + } + + if (ctrlr->tag) + bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); + + if (ctrlr->res) + bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), + ctrlr->res); + + return (status); +} + +static int +ufshci_acpi_detach(device_t dev) +{ + return (ufshci_detach(dev)); +} + +static int +ufshci_acpi_suspend(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int error; + + error = bus_generic_suspend(dev); + if (error) + return (error); + + /* Currently, PCI-based ufshci only supports POWER_STYPE_STANDBY */ + error = ufshci_ctrlr_suspend(ctrlr, POWER_STYPE_STANDBY); + return (error); +} + +static int +ufshci_acpi_resume(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int error; + + error = ufshci_ctrlr_resume(ctrlr, POWER_STYPE_AWAKE); + if (error) + return (error); + + error = bus_generic_resume(dev); + return (error); +} diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c index f011d03189e0..244aa723d02a 100644 --- a/sys/dev/ufshci/ufshci_ctrlr.c +++ b/sys/dev/ufshci/ufshci_ctrlr.c @@ -21,6 +21,50 @@ ufshci_ctrlr_fail(struct ufshci_controller *ctrlr) ufshci_req_queue_fail(ctrlr, &ctrlr->transfer_req_queue); } +/* Some controllers require a reinit after switching to the max gear. */ +static int +ufshci_ctrlr_reinit_after_max_gear_switch(struct ufshci_controller *ctrlr) +{ + int error; + + /* Reset device */ + ufshci_utmr_req_queue_disable(ctrlr); + ufshci_utr_req_queue_disable(ctrlr); + + error = ufshci_ctrlr_disable(ctrlr); + if (error != 0) + return (error); + + error = ufshci_ctrlr_enable(ctrlr); + if (error != 0) + return (error); + + error = ufshci_utmr_req_queue_enable(ctrlr); + if (error != 0) + return (error); + + error = ufshci_utr_req_queue_enable(ctrlr); + if (error != 0) + return (error); + + error = ufshci_ctrlr_send_nop(ctrlr); + if (error != 0) + return (error); + + /* Reinit the target device. */ + error = ufshci_dev_init(ctrlr); + if (error != 0) + return (error); + + /* Initialize Reference Clock */ + error = ufshci_dev_init_reference_clock(ctrlr); + if (error != 0) + return (error); + + /* Initialize unipro */ + return (ufshci_dev_init_unipro(ctrlr)); +} + static void ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting) { @@ -77,6 +121,12 @@ ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting) ufshci_dev_init_uic_link_state(ctrlr); + if ((ctrlr->quirks & UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) && + ufshci_ctrlr_reinit_after_max_gear_switch(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr); + return; + } + /* Read Controller Descriptor (Device, Geometry) */ if (ufshci_dev_get_descriptor(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr); @@ -199,7 +249,7 @@ ufshci_ctrlr_disable(struct ufshci_controller *ctrlr) return (error); } -static int +int ufshci_ctrlr_enable(struct ufshci_controller *ctrlr) { uint32_t ie, hcs; @@ -302,15 +352,18 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) /* Read Device Capabilities */ ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); - ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); - /* - * TODO: This driver does not yet support multi-queue. - * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if - * multi-queue support is available. - */ - ctrlr->is_mcq_supported = false; - if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) + if (ctrlr->quirks & UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP) { + ctrlr->is_single_db_supported = true; + ctrlr->is_mcq_supported = true; + } else { + ctrlr->is_single_db_supported = (UFSHCIV(UFSHCI_CAP_REG_LSDBS, + cap) == 0); + ctrlr->is_mcq_supported = (UFSHCIV(UFSHCI_CAP_REG_MCQS, cap) == + 1); + } + if (!(ctrlr->is_single_db_supported || ctrlr->is_mcq_supported)) return (ENXIO); + /* * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c index 3167945b53b6..0fedbca9a90e 100644 --- a/sys/dev/ufshci/ufshci_dev.c +++ b/sys/dev/ufshci/ufshci_dev.c @@ -325,7 +325,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) */ const uint32_t fast_mode = 1; const uint32_t rx_bit_shift = 4; - uint32_t power_mode, peer_granularity; + uint32_t peer_granularity; /* Update lanes with available TX/RX lanes */ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, @@ -352,9 +352,11 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { /* Before changing gears, first change the number of lanes. */ - if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) + if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, + &ctrlr->tx_rx_power_mode)) return (ENXIO); - if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, + ctrlr->tx_rx_power_mode)) return (ENXIO); /* Wait for power mode changed. */ @@ -415,8 +417,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) return (ENXIO); /* Set TX/RX PWRMode */ - power_mode = (fast_mode << rx_bit_shift) | fast_mode; - if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) + ctrlr->tx_rx_power_mode = (fast_mode << rx_bit_shift) | fast_mode; + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, ctrlr->tx_rx_power_mode)) return (ENXIO); /* Wait for power mode changed. */ diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c index 606f2a095576..b6b8124bc3a6 100644 --- a/sys/dev/ufshci/ufshci_pci.c +++ b/sys/dev/ufshci/ufshci_pci.c @@ -34,8 +34,7 @@ static device_method_t ufshci_pci_methods[] = { DEVMETHOD(device_attach, ufshci_pci_attach), DEVMETHOD(device_detach, ufshci_pci_detach), DEVMETHOD(device_suspend, ufshci_pci_suspend), - DEVMETHOD(device_resume, ufshci_pci_resume), - DEVMETHOD_END + DEVMETHOD(device_resume, ufshci_pci_resume), DEVMETHOD_END }; static driver_t ufshci_pci_driver = { diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index 1634cf51c9fb..067b51a419e8 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -315,10 +315,15 @@ struct ufshci_controller { #define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \ 16 /* QEMU does not support Task Management Request */ #define UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS \ - 32 /* QEMU does not support Well known logical units*/ + 32 /* QEMU does not support Well known logical units */ #define UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE \ 64 /* Some controllers have the Auto hibernate feature enabled but it \ does not work. */ +#define UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH \ + 128 /* Some controllers need to reinit the device after gear switch. \ + */ +#define UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP \ + 256 /* Some controllers have their LSDB and MCQS fields reset to 0. */ uint32_t ref_clk; @@ -391,12 +396,13 @@ struct ufshci_controller { /* UFS Transport Protocol Layer (UTP) */ struct ufshci_req_queue task_mgmt_req_queue; struct ufshci_req_queue transfer_req_queue; - bool is_single_db_supported; /* 0 = supported */ - bool is_mcq_supported; /* 1 = supported */ + bool is_single_db_supported; + bool is_mcq_supported; /* UFS Interconnect Layer (UIC) */ struct mtx uic_cmd_lock; - uint8_t hs_gear; + uint32_t tx_rx_power_mode; + uint32_t hs_gear; uint32_t tx_lanes; uint32_t rx_lanes; uint32_t max_rx_hs_gear; @@ -442,6 +448,7 @@ int ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr, int ufshci_ctrlr_resume(struct ufshci_controller *ctrlr, enum power_stype stype); int ufshci_ctrlr_disable(struct ufshci_controller *ctrlr); +int ufshci_ctrlr_enable(struct ufshci_controller *ctrlr); /* ctrlr defined as void * to allow use with config_intrhook. */ void ufshci_ctrlr_start_config_hook(void *arg); void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr); diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c index ca47aa159c5b..54542f48b32c 100644 --- a/sys/dev/ufshci/ufshci_req_sdb.c +++ b/sys/dev/ufshci/ufshci_req_sdb.c @@ -374,34 +374,63 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, struct ufshci_req_queue *req_queue) { struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + int error = 0; + + mtx_lock(&hwq->recovery_lock); + mtx_lock(&hwq->qlock); if (req_queue->is_task_mgmt) { uint32_t hcs, utmrldbr, utmrlrsr; + uint32_t utmrlba, utmrlbau; + + /* + * Some controllers require re-enabling. When a controller is + * re-enabled, the utmrlba registers are initialized, and these + * must be reconfigured upon re-enabling. + */ + utmrlba = hwq->req_queue_addr & 0xffffffff; + utmrlbau = hwq->req_queue_addr >> 32; + ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba); + ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau); hcs = ufshci_mmio_read_4(ctrlr, hcs); if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) { ufshci_printf(ctrlr, "UTP task management request list is not ready\n"); - return (ENXIO); + error = ENXIO; + goto out; } utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr); if (utmrldbr != 0) { ufshci_printf(ctrlr, "UTP task management request list door bell is not ready\n"); - return (ENXIO); + error = ENXIO; + goto out; } utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR); ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr); } else { uint32_t hcs, utrldbr, utrlcnr, utrlrsr; + uint32_t utrlba, utrlbau; + + /* + * Some controllers require re-enabling. When a controller is + * re-enabled, the utrlba registers are initialized, and these + * must be reconfigured upon re-enabling. + */ + utrlba = hwq->req_queue_addr & 0xffffffff; + utrlbau = hwq->req_queue_addr >> 32; + ufshci_mmio_write_4(ctrlr, utrlba, utrlba); + ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau); hcs = ufshci_mmio_read_4(ctrlr, hcs); if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) { ufshci_printf(ctrlr, "UTP transfer request list is not ready\n"); - return (ENXIO); + error = ENXIO; + goto out; } utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr); @@ -434,7 +463,10 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, hwq->recovery_state = RECOVERY_NONE; - return (0); +out: + mtx_unlock(&hwq->qlock); + mtx_unlock(&hwq->recovery_lock); + return (error); } int diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c index 495f087f3c50..a113e951798e 100644 --- a/sys/dev/ufshci/ufshci_sysctl.c +++ b/sys/dev/ufshci/ufshci_sysctl.c @@ -193,7 +193,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs"); SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD, - &ctrlr->cap, 0, "Number of I/O queue pairs"); + &ctrlr->cap, 0, "Host controller capabilities register value"); SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled", CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable"); @@ -214,10 +214,6 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) &dev->wb_user_space_config_option, 0, "WriteBooster preserve user space mode"); - SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode_supported", - CTLFLAG_RD, &dev->power_mode_supported, 0, - "Device power mode support"); - SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "auto_hibernation_supported", CTLFLAG_RD, &dev->auto_hibernation_supported, 0, @@ -229,9 +225,38 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) ufshci_sysctl_ahit, "IU", "Auto-Hibernate Idle Timer Value (in microseconds)"); + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode_supported", + CTLFLAG_RD, &dev->power_mode_supported, 0, + "Device power mode support"); + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode", CTLFLAG_RD, &dev->power_mode, 0, "Current device power mode"); + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "tx_rx_power_mode", + CTLFLAG_RD, &ctrlr->tx_rx_power_mode, 0, + "Current TX/RX PA_PWRMode value"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_tx_lanes", + CTLFLAG_RD, &ctrlr->max_tx_lanes, 0, + "Maximum available TX data lanes"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_rx_lanes", + CTLFLAG_RD, &ctrlr->max_rx_lanes, 0, + "Maximum available RX data lanes"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "tx_lanes", CTLFLAG_RD, + &ctrlr->tx_lanes, 0, "Active TX data lanes"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "rx_lanes", CTLFLAG_RD, + &ctrlr->rx_lanes, 0, "Active RX data lanes"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_rx_hs_gear", + CTLFLAG_RD, &ctrlr->max_rx_hs_gear, 0, + "Maximum available RX HS gear"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "hs_gear", CTLFLAG_RD, + &ctrlr->hs_gear, 0, "Active HS gear"); + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period, 0, ufshci_sysctl_timeout_period, "IU", diff --git a/sys/dev/usb/input/ukbd.c b/sys/dev/usb/input/ukbd.c index 57e9beac34b6..7a33a9ad2efe 100644 --- a/sys/dev/usb/input/ukbd.c +++ b/sys/dev/usb/input/ukbd.c @@ -95,18 +95,23 @@ #ifdef USB_DEBUG static int ukbd_debug = 0; +#endif static int ukbd_no_leds = 0; static int ukbd_pollrate = 0; +static int ukbd_apple_fn_mode = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ukbd, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB keyboard"); +#ifdef USB_DEBUG SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, debug, CTLFLAG_RWTUN, &ukbd_debug, 0, "Debug level"); +#endif SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, no_leds, CTLFLAG_RWTUN, &ukbd_no_leds, 0, "Disables setting of keyboard leds"); SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, pollrate, CTLFLAG_RWTUN, &ukbd_pollrate, 0, "Force this polling rate, 1-1000Hz"); -#endif +SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, apple_fn_mode, CTLFLAG_RWTUN, + &ukbd_apple_fn_mode, 0, "0 = Fn + F1..12 -> media, 1 = F1..F12 -> media"); #define UKBD_EMULATE_ATSCANCODE 1 #define UKBD_DRIVER_NAME "ukbd" @@ -123,6 +128,10 @@ SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, pollrate, CTLFLAG_RWTUN, #define MOD_EJECT 0x01 #define MOD_FN 0x02 +/* check evdev_usb_scancodes[] for names */ +#define APPLE_FN_KEY 0xff +#define APPLE_EJECT_KEY 0xec + struct ukbd_data { uint64_t bitmap[howmany(UKBD_NKEYCODE, 64)]; }; @@ -198,6 +207,7 @@ struct ukbd_softc { uint16_t sc_inputs; uint16_t sc_inputhead; uint16_t sc_inputtail; + uint16_t sc_vendor_id; uint8_t sc_leds; /* store for async led requests */ uint8_t sc_iface_index; @@ -282,9 +292,9 @@ static const uint8_t ukbd_trtab[256] = { NN, NN, NN, NN, NN, NN, NN, NN, /* D0 - D7 */ NN, NN, NN, NN, NN, NN, NN, NN, /* D8 - DF */ 29, 42, 56, 105, 90, 54, 93, 106, /* E0 - E7 */ - NN, NN, NN, NN, NN, NN, NN, NN, /* E8 - EF */ + NN, NN, NN, NN, 254, NN, NN, NN, /* E8 - EF */ NN, NN, NN, NN, NN, NN, NN, NN, /* F0 - F7 */ - NN, NN, NN, NN, NN, NN, NN, NN, /* F8 - FF */ + NN, NN, NN, NN, NN, NN, NN, 255, /* F8 - FF */ }; static const uint8_t ukbd_boot_desc[] = { @@ -582,14 +592,14 @@ ukbd_interrupt(struct ukbd_softc *sc) sc->sc_repeat_key = 0; } else { ukbd_put_key(sc, key | KEY_PRESS); - - sc->sc_co_basetime = sbinuptime(); - sc->sc_delay = sc->sc_kbd.kb_delay1; - ukbd_start_timer(sc); - - /* set repeat time for last key */ - sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1; - sc->sc_repeat_key = key; + if (key != APPLE_FN_KEY) { + sc->sc_co_basetime = sbinuptime(); + sc->sc_delay = sc->sc_kbd.kb_delay1; + ukbd_start_timer(sc); + /* set repeat time for last key */ + sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1; + sc->sc_repeat_key = key; + } } } } @@ -669,6 +679,16 @@ static uint32_t ukbd_apple_fn(uint32_t keycode) { switch (keycode) { + case 0x0b: return 0x50; /* H -> LEFT ARROW */ + case 0x0d: return 0x51; /* J -> DOWN ARROW */ + case 0x0e: return 0x52; /* K -> UP ARROW */ + case 0x0f: return 0x4f; /* L -> RIGHT ARROW */ + case 0x36: return 0x4a; /* COMMA -> HOME */ + case 0x37: return 0x4d; /* DOT -> END */ + case 0x18: return 0x4b; /* U -> PGUP */ + case 0x07: return 0x4e; /* D -> PGDN */ + case 0x16: return 0x47; /* S -> SCROLLLOCK */ + case 0x13: return 0x46; /* P -> SYSRQ/PRTSC */ case 0x28: return 0x49; /* RETURN -> INSERT */ case 0x2a: return 0x4c; /* BACKSPACE -> DEL */ case 0x50: return 0x4a; /* LEFT ARROW -> HOME */ @@ -679,6 +699,27 @@ ukbd_apple_fn(uint32_t keycode) } } +/* separate so the sysctl doesn't butcher non-fn keys */ +static uint32_t +ukbd_apple_fn_media(uint32_t keycode) +{ + switch (keycode) { + case 0x3a: return 0xc0; /* F1 -> BRIGHTNESS DOWN */ + case 0x3b: return 0xc1; /* F2 -> BRIGHTNESS UP */ + case 0x3c: return 0xc2; /* F3 -> SCALE (MISSION CTRL)*/ + case 0x3d: return 0xc3; /* F4 -> DASHBOARD (LAUNCHPAD) */ + case 0x3e: return 0xc4; /* F5 -> KBD BACKLIGHT DOWN */ + case 0x3f: return 0xc5; /* F6 -> KBD BACKLIGHT UP */ + case 0x40: return 0xea; /* F7 -> MEDIA PREV */ + case 0x41: return 0xe8; /* F8 -> PLAY/PAUSE */ + case 0x42: return 0xeb; /* F9 -> MEDIA NEXT */ + case 0x43: return 0xef; /* F10 -> MUTE */ + case 0x44: return 0xee; /* F11 -> VOLUME DOWN */ + case 0x45: return 0xed; /* F12 -> VOLUME UP */ + default: return keycode; + } +} + static uint32_t ukbd_apple_swap(uint32_t keycode) { @@ -740,18 +781,34 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error) /* clear modifiers */ modifiers = 0; - /* scan through HID data */ + /* scan through HID data and expose magic apple keys */ if ((sc->sc_flags & UKBD_FLAG_APPLE_EJECT) && (id == sc->sc_id_apple_eject)) { - if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_eject)) + if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_eject)) { + sc->sc_ndata.bitmap[APPLE_EJECT_KEY / 64] |= + 1ULL << (APPLE_EJECT_KEY % 64); modifiers |= MOD_EJECT; + } else { + sc->sc_ndata.bitmap[APPLE_EJECT_KEY / 64] &= + ~(1ULL << (APPLE_EJECT_KEY % 64)); + } } if ((sc->sc_flags & UKBD_FLAG_APPLE_FN) && (id == sc->sc_id_apple_fn)) { - if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_fn)) + if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_fn)) { + sc->sc_ndata.bitmap[APPLE_FN_KEY / 64] |= + 1ULL << (APPLE_FN_KEY % 64); modifiers |= MOD_FN; + } else { + sc->sc_ndata.bitmap[APPLE_FN_KEY / 64] &= + ~(1ULL << (APPLE_FN_KEY % 64)); + } } + int apply_apple_fn_media = (modifiers & MOD_FN) ? 1 : 0; + if (ukbd_apple_fn_mode) /* toggle from sysctl value */ + apply_apple_fn_media = !apply_apple_fn_media; + for (i = 0; i != UKBD_NKEYCODE; i++) { const uint64_t valid = sc->sc_loc_key_valid[i / 64]; const uint64_t mask = 1ULL << (i % 64); @@ -780,6 +837,8 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error) } if (modifiers & MOD_FN) key = ukbd_apple_fn(key); + if (apply_apple_fn_media) + key = ukbd_apple_fn_media(key); if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP) key = ukbd_apple_swap(key); if (key == KEY_NONE || key >= UKBD_NKEYCODE) @@ -792,6 +851,8 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error) if (modifiers & MOD_FN) key = ukbd_apple_fn(key); + if (apply_apple_fn_media) + key = ukbd_apple_fn_media(key); if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP) key = ukbd_apple_swap(key); if (key == KEY_NONE || key == KEY_ERROR || key >= UKBD_NKEYCODE) @@ -1045,21 +1106,37 @@ ukbd_parse_hid(struct ukbd_softc *sc, const uint8_t *ptr, uint32_t len) hid_input, &sc->sc_kbd_id); /* investigate if this is an Apple Keyboard */ - if (hid_locate(ptr, len, - HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT), - hid_input, 0, &sc->sc_loc_apple_eject, &flags, - &sc->sc_id_apple_eject)) { - if (flags & HIO_VARIABLE) - sc->sc_flags |= UKBD_FLAG_APPLE_EJECT; - DPRINTFN(1, "Found Apple eject-key\n"); - } - if (hid_locate(ptr, len, - HID_USAGE2(0xFFFF, 0x0003), - hid_input, 0, &sc->sc_loc_apple_fn, &flags, - &sc->sc_id_apple_fn)) { - if (flags & HIO_VARIABLE) - sc->sc_flags |= UKBD_FLAG_APPLE_FN; - DPRINTFN(1, "Found Apple FN-key\n"); + if (sc->sc_vendor_id == USB_VENDOR_APPLE) { + if (hid_locate(ptr, len, + HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT), + hid_input, 0, &sc->sc_loc_apple_eject, &flags, + &sc->sc_id_apple_eject)) { + if (flags & HIO_VARIABLE) + sc->sc_flags |= UKBD_FLAG_APPLE_EJECT; + DPRINTFN(1, "Found Apple eject-key\n"); + } + /* + * check the same vendor pages that linux does to find the one + * apple uses for the function key. + */ + static const uint16_t apple_pages[] = { + HUP_APPLE, /* HID_UP_CUSTOM in linux */ + HUP_MICROSOFT, /* HID_UP_MSVENDOR in linux */ + HUP_HP, /* HID_UP_HPVENDOR2 in linux */ + 0xFFFF /* Original FreeBSD check (Remove?) */ + }; + for (int i = 0; i < (int)nitems(apple_pages); i++) { + if (hid_locate(ptr, len, + HID_USAGE2(apple_pages[i], 0x0003), + hid_input, 0, &sc->sc_loc_apple_fn, &flags, + &sc->sc_id_apple_fn)) { + if (flags & HIO_VARIABLE) + sc->sc_flags |= UKBD_FLAG_APPLE_FN; + DPRINTFN(1, "Found Apple FN-key on page 0x%04x\n", + apple_pages[i]); + break; + } + } } /* figure out event buffer */ @@ -1147,6 +1224,7 @@ ukbd_attach(device_t dev) sc->sc_udev = uaa->device; sc->sc_iface = uaa->iface; + sc->sc_vendor_id = uaa->info.idVendor; sc->sc_iface_index = uaa->info.bIfaceIndex; sc->sc_iface_no = uaa->info.bIfaceNum; sc->sc_mode = K_XLATE; diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs index bb039f59ce19..b0934cd63a92 100644 --- a/sys/dev/usb/usbdevs +++ b/sys/dev/usb/usbdevs @@ -786,6 +786,7 @@ vendor PERASO 0x2932 Peraso Technologies, Inc. vendor PLANEX 0x2c02 Planex Communications vendor MERCUSYS 0x2c4e Mercusys, Inc. vendor QUECTEL 0x2c7c Quectel Wireless Solutions +vendor NUAND 0x2cf0 Nuand LLC vendor VIDZMEDIA 0x3275 VidzMedia Pte Ltd vendor LINKINSTRUMENTS 0x3195 Link Instruments Inc. vendor AEI 0x3334 AEI @@ -1695,6 +1696,7 @@ product CYBERTAN RT2870 0x1828 RT2870 /* Cypress Semiconductor products */ product CYPRESS MOUSE 0x0001 mouse product CYPRESS THERMO 0x0002 thermometer +product CYPRESS FX3 0x00f3 EZ-USB FX3 product CYPRESS WISPY1A 0x0bad MetaGeek Wi-Spy product CYPRESS KBDHUB 0x0101 Keyboard/Hub product CYPRESS FMRADIO 0x1002 FM Radio @@ -3556,6 +3558,11 @@ product NIKON E990 0x0102 Digital Camera E990 product NIKON LS40 0x4000 CoolScan LS40 ED product NIKON D300 0x041a Digital Camera D300 +/* Nuand LLC products */ +product NUAND BLADERF 0x5246 bladeRF Software Defined Radio +product NUAND BLADERF_BL 0x5247 bladeRF Bootloader +product NUAND BLADERF2 0x5250 bladeRF 2.0 Software Defined Radio + /* NovaTech Products */ product NOVATECH NV902 0x9020 NovaTech NV-902W product NOVATECH RT2573 0x9021 RT2573 diff --git a/sys/dev/vmgenc/vmgenc_acpi.c b/sys/dev/vmgenc/vmgenc_acpi.c index 18519a8e4f22..59fcbd5346ba 100644 --- a/sys/dev/vmgenc/vmgenc_acpi.c +++ b/sys/dev/vmgenc/vmgenc_acpi.c @@ -261,4 +261,4 @@ static driver_t vmgenc_driver = { DRIVER_MODULE(vmgenc, acpi, vmgenc_driver, NULL, NULL); MODULE_DEPEND(vmgenc, acpi, 1, 1, 1); -MODULE_DEPEND(vemgenc, random_harvestq, 1, 1, 1); +MODULE_DEPEND(vmgenc, random_harvestq, 1, 1, 1); diff --git a/sys/fs/cd9660/cd9660_rrip.c b/sys/fs/cd9660/cd9660_rrip.c index d0b0008d10b2..6e91f1abb9d2 100644 --- a/sys/fs/cd9660/cd9660_rrip.c +++ b/sys/fs/cd9660/cd9660_rrip.c @@ -88,7 +88,7 @@ cd9660_rrip_attr(ISO_RRIP_ATTR *p, ISO_RRIP_ANALYZE *ana) ana->inop->inode.iso_gid = isonum_733(p->gid); ana->inop->inode.iso_links = isonum_733(p->links); ana->fields &= ~ISO_SUSP_ATTR; - return ISO_SUSP_ATTR; + return (ISO_SUSP_ATTR); } static void @@ -96,7 +96,7 @@ cd9660_rrip_defattr(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana) { /* But this is a required field! */ printf("RRIP without PX field?\n"); - cd9660_defattr(isodir,ana->inop,NULL,ISO_FTYPE_RRIP); + cd9660_defattr(isodir, ana->inop, NULL, ISO_FTYPE_RRIP); } /* @@ -188,7 +188,7 @@ cd9660_rrip_slink(ISO_RRIP_SLINK *p, ISO_RRIP_ANALYZE *ana) ana->fields = 0; ana->outbuf -= *ana->outlen; *ana->outlen = 0; - return 0; + return (0); } memcpy(outbuf, inbuf, wlen); @@ -201,9 +201,9 @@ cd9660_rrip_slink(ISO_RRIP_SLINK *p, ISO_RRIP_ANALYZE *ana) if (!isonum_711(p->flags)) { ana->fields &= ~ISO_SUSP_SLINK; - return ISO_SUSP_SLINK; + return (ISO_SUSP_SLINK); } - return 0; + return (0); } /* @@ -259,7 +259,7 @@ cd9660_rrip_altname(ISO_RRIP_ALTNAME *p, ISO_RRIP_ANALYZE *ana) ana->fields &= ~ISO_SUSP_ALTNAME; ana->outbuf -= *ana->outlen - wlen; *ana->outlen = 0; - return 0; + return (0); } memcpy(ana->outbuf, inbuf, wlen); @@ -267,18 +267,17 @@ cd9660_rrip_altname(ISO_RRIP_ALTNAME *p, ISO_RRIP_ANALYZE *ana) if (!cont) { ana->fields &= ~ISO_SUSP_ALTNAME; - return ISO_SUSP_ALTNAME; + return (ISO_SUSP_ALTNAME); } - return 0; + return (0); } static void cd9660_rrip_defname(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana) { - isofntrans(isodir->name,isonum_711(isodir->name_len), - ana->outbuf,ana->outlen, - 1,isonum_711(isodir->flags)&4, ana->imp->joliet_level, - ana->imp->im_flags, ana->imp->im_d2l); + isofntrans(isodir->name, isonum_711(isodir->name_len), + ana->outbuf, ana->outlen, 1, isonum_711(isodir->flags)&4, + ana->imp->joliet_level, ana->imp->im_flags, ana->imp->im_d2l); switch (*ana->outbuf) { default: break; @@ -287,7 +286,7 @@ cd9660_rrip_defname(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana) /* FALLTHROUGH */ case 0: /* outlen is 1 already */ - strcpy(ana->outbuf,".."); + strcpy(ana->outbuf, ".."); break; } } @@ -299,8 +298,8 @@ static int cd9660_rrip_pclink(ISO_RRIP_CLINK *p, ISO_RRIP_ANALYZE *ana) { *ana->inump = isonum_733(p->dir_loc) << ana->imp->im_bshift; - ana->fields &= ~(ISO_SUSP_CLINK|ISO_SUSP_PLINK); - return *p->h.type == 'C' ? ISO_SUSP_CLINK : ISO_SUSP_PLINK; + ana->fields &= ~(ISO_SUSP_CLINK | ISO_SUSP_PLINK); + return (*p->h.type == 'C' ? ISO_SUSP_CLINK : ISO_SUSP_PLINK); } /* @@ -312,7 +311,7 @@ cd9660_rrip_reldir(ISO_RRIP_RELDIR *p, ISO_RRIP_ANALYZE *ana) /* special hack to make caller aware of RE field */ *ana->outlen = 0; ana->fields = 0; - return ISO_SUSP_RELDIR|ISO_SUSP_ALTNAME|ISO_SUSP_CLINK|ISO_SUSP_PLINK; + return (ISO_SUSP_RELDIR | ISO_SUSP_ALTNAME | ISO_SUSP_CLINK | ISO_SUSP_PLINK); } static int @@ -323,60 +322,61 @@ cd9660_rrip_tstamp(ISO_RRIP_TSTAMP *p, ISO_RRIP_ANALYZE *ana) ptime = p->time; /* Check a format of time stamp (7bytes/17bytes) */ - if (!(*p->flags&ISO_SUSP_TSTAMP_FORM17)) { - if (*p->flags&ISO_SUSP_TSTAMP_CREAT) + if (!(*p->flags & ISO_SUSP_TSTAMP_FORM17)) { + if (*p->flags & ISO_SUSP_TSTAMP_CREAT) { ptime += 7; - - if (*p->flags&ISO_SUSP_TSTAMP_MODIFY) { - cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_mtime, - ISO_FTYPE_RRIP); + } + if (*p->flags & ISO_SUSP_TSTAMP_MODIFY) { + cd9660_tstamp_conv7(ptime, &ana->inop->inode.iso_mtime, + ISO_FTYPE_RRIP); ptime += 7; - } else + } else { memset(&ana->inop->inode.iso_mtime, 0, sizeof(struct timespec)); - - if (*p->flags&ISO_SUSP_TSTAMP_ACCESS) { - cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_atime, - ISO_FTYPE_RRIP); + } + if (*p->flags & ISO_SUSP_TSTAMP_ACCESS) { + cd9660_tstamp_conv7(ptime, &ana->inop->inode.iso_atime, + ISO_FTYPE_RRIP); ptime += 7; - } else + } else { ana->inop->inode.iso_atime = ana->inop->inode.iso_mtime; - - if (*p->flags&ISO_SUSP_TSTAMP_ATTR) - cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_ctime, - ISO_FTYPE_RRIP); - else + } + if (*p->flags & ISO_SUSP_TSTAMP_ATTR) { + cd9660_tstamp_conv7(ptime, &ana->inop->inode.iso_ctime, + ISO_FTYPE_RRIP); + } else { ana->inop->inode.iso_ctime = ana->inop->inode.iso_mtime; - + } } else { - if (*p->flags&ISO_SUSP_TSTAMP_CREAT) + if (*p->flags & ISO_SUSP_TSTAMP_CREAT) { ptime += 17; - - if (*p->flags&ISO_SUSP_TSTAMP_MODIFY) { - cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_mtime); + } + if (*p->flags & ISO_SUSP_TSTAMP_MODIFY) { + cd9660_tstamp_conv17(ptime, &ana->inop->inode.iso_mtime); ptime += 17; - } else + } else { memset(&ana->inop->inode.iso_mtime, 0, sizeof(struct timespec)); - - if (*p->flags&ISO_SUSP_TSTAMP_ACCESS) { - cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_atime); + } + if (*p->flags & ISO_SUSP_TSTAMP_ACCESS) { + cd9660_tstamp_conv17(ptime, &ana->inop->inode.iso_atime); ptime += 17; - } else + } else { ana->inop->inode.iso_atime = ana->inop->inode.iso_mtime; - - if (*p->flags&ISO_SUSP_TSTAMP_ATTR) - cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_ctime); - else + } + if (*p->flags & ISO_SUSP_TSTAMP_ATTR) { + cd9660_tstamp_conv17(ptime, &ana->inop->inode.iso_ctime); + } else { ana->inop->inode.iso_ctime = ana->inop->inode.iso_mtime; + } } ana->fields &= ~ISO_SUSP_TSTAMP; - return ISO_SUSP_TSTAMP; + return (ISO_SUSP_TSTAMP); } static void cd9660_rrip_deftstamp(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana) { - cd9660_deftstamp(isodir,ana->inop,NULL,ISO_FTYPE_RRIP); + cd9660_deftstamp(isodir, ana->inop, NULL, ISO_FTYPE_RRIP); } /* @@ -395,7 +395,7 @@ cd9660_rrip_device(ISO_RRIP_DEVICE *p, ISO_RRIP_ANALYZE *ana) else ana->inop->inode.iso_rdev = makedev(high, minor(low)); ana->fields &= ~ISO_SUSP_DEVICE; - return ISO_SUSP_DEVICE; + return (ISO_SUSP_DEVICE); } /* @@ -404,12 +404,12 @@ cd9660_rrip_device(ISO_RRIP_DEVICE *p, ISO_RRIP_ANALYZE *ana) static int cd9660_rrip_idflag(ISO_RRIP_IDFLAG *p, ISO_RRIP_ANALYZE *ana) { - ana->fields &= isonum_711(p->flags)|~0xff; /* don't touch high bits */ + ana->fields &= isonum_711(p->flags) | ~0xff; /* don't touch high bits */ /* special handling of RE field */ - if (ana->fields&ISO_SUSP_RELDIR) - return cd9660_rrip_reldir(/* XXX */ (ISO_RRIP_RELDIR *)p,ana); + if (ana->fields & ISO_SUSP_RELDIR) + return (cd9660_rrip_reldir(/* XXX */ (ISO_RRIP_RELDIR *)p, ana)); - return ISO_SUSP_IDFLAG; + return (ISO_SUSP_IDFLAG); } /* @@ -421,7 +421,7 @@ cd9660_rrip_cont(ISO_RRIP_CONT *p, ISO_RRIP_ANALYZE *ana) ana->iso_ce_blk = isonum_733(p->location); ana->iso_ce_off = isonum_733(p->offset); ana->iso_ce_len = isonum_733(p->length); - return ISO_SUSP_CONT; + return (ISO_SUSP_CONT); } /* @@ -430,7 +430,7 @@ cd9660_rrip_cont(ISO_RRIP_CONT *p, ISO_RRIP_ANALYZE *ana) static int cd9660_rrip_stop(ISO_SUSP_HEADER *p, ISO_RRIP_ANALYZE *ana) { - return ISO_SUSP_STOP; + return (ISO_SUSP_STOP); } /* @@ -439,16 +439,18 @@ cd9660_rrip_stop(ISO_SUSP_HEADER *p, ISO_RRIP_ANALYZE *ana) static int cd9660_rrip_extref(ISO_RRIP_EXTREF *p, ISO_RRIP_ANALYZE *ana) { - if ( ! ((isonum_711(p->len_id) == 10 - && bcmp((char *)p + 8,"RRIP_1991A",10) == 0) - || (isonum_711(p->len_id) == 10 - && bcmp((char *)p + 8,"IEEE_P1282",10) == 0) - || (isonum_711(p->len_id) == 9 - && bcmp((char *)p + 8,"IEEE_1282", 9) == 0)) - || isonum_711(p->version) != 1) - return 0; - ana->fields &= ~ISO_SUSP_EXTREF; - return ISO_SUSP_EXTREF; + size_t len = isonum_711(p->len_id); + char *data = (char *)(p + 1); + + if ((len == 10 && memcmp(data, "RRIP_1991A", len) == 0) || + (len == 10 && memcmp(data, "IEEE_P1282", len) == 0) || + (len == 9 && memcmp(data, "IEEE_1282", len) == 0)) { + if (isonum_711(p->version) == 1) { + ana->fields &= ~ISO_SUSP_EXTREF; + return (ISO_SUSP_EXTREF); + } + } + return (0); } static int @@ -471,7 +473,7 @@ cd9660_rrip_loop(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana, if (!(isonum_711(isodir->name_len)&1)) pwhead++; isochar(isodir->name, pwhead, ana->imp->joliet_level, &c, NULL, - ana->imp->im_flags, ana->imp->im_d2l); + ana->imp->im_flags, ana->imp->im_d2l); /* If it's not the '.' entry of the root dir obey SP field */ if (c != 0 || isonum_733(isodir->extent) != ana->imp->root_extent) @@ -491,16 +493,16 @@ cd9660_rrip_loop(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana, while (pend >= phead + 1) { if (isonum_711(phead->version) == 1) { for (ptable = table; ptable->func; ptable++) { - if (*phead->type == *ptable->type - && phead->type[1] == ptable->type[1]) { - result |= ptable->func(phead,ana); + if (phead->type[0] == ptable->type[0] && + phead->type[1] == ptable->type[1]) { + result |= ptable->func(phead, ana); break; } } if (!ana->fields) break; } - if (result&ISO_SUSP_STOP) { + if (result & ISO_SUSP_STOP) { result &= ~ISO_SUSP_STOP; break; } @@ -530,7 +532,7 @@ cd9660_rrip_loop(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana, /* what to do now? */ break; phead = (ISO_SUSP_HEADER *)(bp->b_data + ana->iso_ce_off); - pend = (ISO_SUSP_HEADER *) ((char *)phead + ana->iso_ce_len); + pend = (ISO_SUSP_HEADER *)((char *)phead + ana->iso_ce_len); } else break; } @@ -541,10 +543,10 @@ cd9660_rrip_loop(struct iso_directory_record *isodir, ISO_RRIP_ANALYZE *ana, * (attribute/time stamp) */ for (ptable = table; ptable->func2; ptable++) - if (!(ptable->result&result)) - ptable->func2(isodir,ana); + if (!(ptable->result & result)) + ptable->func2(isodir, ana); - return result; + return (result); } /* @@ -572,9 +574,9 @@ cd9660_rrip_analyze(struct iso_directory_record *isodir, struct iso_node *inop, analyze.inop = inop; analyze.imp = imp; - analyze.fields = ISO_SUSP_ATTR|ISO_SUSP_TSTAMP|ISO_SUSP_DEVICE; + analyze.fields = ISO_SUSP_ATTR | ISO_SUSP_TSTAMP | ISO_SUSP_DEVICE; - return cd9660_rrip_loop(isodir,&analyze,rrip_table_analyze); + return (cd9660_rrip_loop(isodir, &analyze, rrip_table_analyze)); } /* @@ -582,8 +584,8 @@ cd9660_rrip_analyze(struct iso_directory_record *isodir, struct iso_node *inop, */ static RRIP_TABLE rrip_table_getname[] = { { "NM", BC cd9660_rrip_altname, cd9660_rrip_defname, ISO_SUSP_ALTNAME }, - { "CL", BC cd9660_rrip_pclink, 0, ISO_SUSP_CLINK|ISO_SUSP_PLINK }, - { "PL", BC cd9660_rrip_pclink, 0, ISO_SUSP_CLINK|ISO_SUSP_PLINK }, + { "CL", BC cd9660_rrip_pclink, 0, ISO_SUSP_CLINK | ISO_SUSP_PLINK }, + { "PL", BC cd9660_rrip_pclink, 0, ISO_SUSP_CLINK | ISO_SUSP_PLINK }, { "RE", BC cd9660_rrip_reldir, 0, ISO_SUSP_RELDIR }, { "RR", BC cd9660_rrip_idflag, 0, ISO_SUSP_IDFLAG }, { "CE", BC cd9660_rrip_cont, 0, ISO_SUSP_CONT }, @@ -604,20 +606,20 @@ cd9660_rrip_getname(struct iso_directory_record *isodir, char *outbuf, analyze.maxlen = NAME_MAX; analyze.inump = inump; analyze.imp = imp; - analyze.fields = ISO_SUSP_ALTNAME|ISO_SUSP_RELDIR|ISO_SUSP_CLINK|ISO_SUSP_PLINK; + analyze.fields = ISO_SUSP_ALTNAME | ISO_SUSP_RELDIR | ISO_SUSP_CLINK | ISO_SUSP_PLINK; *outlen = 0; isochar(isodir->name, isodir->name + isonum_711(isodir->name_len), - imp->joliet_level, &c, NULL, imp->im_flags, imp->im_d2l); + imp->joliet_level, &c, NULL, imp->im_flags, imp->im_d2l); tab = rrip_table_getname; if (c == 0 || c == 1) { - cd9660_rrip_defname(isodir,&analyze); + cd9660_rrip_defname(isodir, &analyze); analyze.fields &= ~ISO_SUSP_ALTNAME; tab++; } - return cd9660_rrip_loop(isodir,&analyze,tab); + return (cd9660_rrip_loop(isodir, &analyze, tab)); } /* @@ -636,6 +638,7 @@ cd9660_rrip_getsymname(struct iso_directory_record *isodir, char *outbuf, u_short *outlen, struct iso_mnt *imp) { ISO_RRIP_ANALYZE analyze; + int ret; analyze.outbuf = outbuf; analyze.outlen = outlen; @@ -645,7 +648,8 @@ cd9660_rrip_getsymname(struct iso_directory_record *isodir, char *outbuf, analyze.imp = imp; analyze.fields = ISO_SUSP_SLINK; - return (cd9660_rrip_loop(isodir,&analyze,rrip_table_getsymname)&ISO_SUSP_SLINK); + ret = cd9660_rrip_loop(isodir, &analyze, rrip_table_getsymname); + return (ret & ISO_SUSP_SLINK); } static RRIP_TABLE rrip_table_extref[] = { @@ -664,21 +668,23 @@ cd9660_rrip_offset(struct iso_directory_record *isodir, struct iso_mnt *imp) { ISO_RRIP_OFFSET *p; ISO_RRIP_ANALYZE analyze; + int ret; imp->rr_skip0 = 0; p = (ISO_RRIP_OFFSET *)(isodir->name + 1); - if (bcmp(p,"SP\7\1\276\357",6)) { + if (memcmp(p, "SP\7\1\276\357", 6) != 0) { /* Maybe, it's a CDROM XA disc? */ imp->rr_skip0 = 15; p = (ISO_RRIP_OFFSET *)((char *)p + 15); - if (bcmp(p,"SP\7\1\276\357",6)) - return -1; + if (memcmp(p, "SP\7\1\276\357", 6) != 0) + return (-1); } analyze.imp = imp; analyze.fields = ISO_SUSP_EXTREF; - if (!(cd9660_rrip_loop(isodir,&analyze,rrip_table_extref)&ISO_SUSP_EXTREF)) - return -1; + ret = cd9660_rrip_loop(isodir, &analyze, rrip_table_extref); + if ((ret & ISO_SUSP_EXTREF) == 0) + return (-1); - return isonum_711(p->skip); + return (isonum_711(p->skip)); } diff --git a/sys/fs/nfsserver/nfs_nfsdsocket.c b/sys/fs/nfsserver/nfs_nfsdsocket.c index 201f3b74b946..efa7906dffc7 100644 --- a/sys/fs/nfsserver/nfs_nfsdsocket.c +++ b/sys/fs/nfsserver/nfs_nfsdsocket.c @@ -1272,7 +1272,8 @@ tryagain: if (vp == NULL || savevp == NULL) { nd->nd_repstat = NFSERR_NOFILEHANDLE; break; - } else if (fsidcmp(&cur_fsid, &save_fsid) != 0) { + } else if (fsidcmp(&cur_fsid, &save_fsid) != 0 && + op != NFSV4OP_COPY && op != NFSV4OP_CLONE) { nd->nd_repstat = NFSERR_XDEV; break; } diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC index 3de346013111..595683883686 100644 --- a/sys/i386/conf/GENERIC +++ b/sys/i386/conf/GENERIC @@ -33,7 +33,6 @@ options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 -options ROUTE_MPATH # Multipath routing support options TCP_HHOOK # hhook(9) framework for TCP options TCP_OFFLOAD # TCP offload options SCTP_SUPPORT # Allow kldload of SCTP diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index e8e670d39d09..f984161bfcd6 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -28,7 +28,6 @@ * SUCH DAMAGE. */ -#include <sys/cdefs.h> #include "opt_ktrace.h" #include "opt_kqueue.h" @@ -1803,6 +1802,19 @@ findkn: error = ENOMEM; goto done; } + + /* + * Now that the kqueue is locked, make sure the fd + * didn't change out from under us. + */ + if (fops->f_isfd && + fget_noref_unlocked(td->td_proc->p_fd, + kev->ident) != fp) { + KQ_UNLOCK(kq); + tkn = kn; + error = EBADF; + goto done; + } kn->kn_fp = fp; kn->kn_kq = kq; kn->kn_fop = fops; diff --git a/sys/kern/subr_sbuf.c b/sys/kern/subr_sbuf.c index 27e18c114afd..c5673e871df4 100644 --- a/sys/kern/subr_sbuf.c +++ b/sys/kern/subr_sbuf.c @@ -124,8 +124,8 @@ _assert_sbuf_state(const char *fun, struct sbuf *s, int state) { KASSERT((s->s_flags & SBUF_FINISHED) == state, - ("%s called with %sfinished or corrupt sbuf", fun, - (state ? "un" : ""))); + ("%s called with %sfinished or corrupt sbuf %p { s_flags %#010x }, " + "state %#010x", fun, (state ? "un" : ""), s, s->s_flags, state)); } #define assert_sbuf_integrity(s) _assert_sbuf_integrity(__func__, (s)) diff --git a/sys/modules/asmc/Makefile b/sys/modules/asmc/Makefile index 4ba45a4625d8..17f6c7eec731 100644 --- a/sys/modules/asmc/Makefile +++ b/sys/modules/asmc/Makefile @@ -1,6 +1,7 @@ .PATH: ${SRCTOP}/sys/dev/asmc KMOD= asmc -SRCS= asmc.c opt_acpi.h opt_asmc.h acpi_if.h bus_if.h device_if.h +SRCS= asmc.c opt_acpi.h opt_asmc.h +SRCS+= acpi_if.h backlight_if.h bus_if.h device_if.h .include <bsd.kmod.mk> diff --git a/sys/modules/hid/hkbd/Makefile b/sys/modules/hid/hkbd/Makefile index 42b5d69dda9e..82f6599cca1d 100644 --- a/sys/modules/hid/hkbd/Makefile +++ b/sys/modules/hid/hkbd/Makefile @@ -3,6 +3,6 @@ KMOD= hkbd SRCS= hkbd.c SRCS+= opt_hid.h opt_evdev.h opt_kbd.h opt_hkbd.h -SRCS+= bus_if.h device_if.h +SRCS+= bus_if.h device_if.h usbdevs.h .include <bsd.kmod.mk> diff --git a/sys/modules/netgraph/netflow/Makefile b/sys/modules/netgraph/netflow/Makefile index c73e9ac41680..c743db9d7eca 100644 --- a/sys/modules/netgraph/netflow/Makefile +++ b/sys/modules/netgraph/netflow/Makefile @@ -5,6 +5,6 @@ .PATH: ${SRCTOP}/sys/netgraph/netflow KMOD= ng_netflow -SRCS= ng_netflow.c netflow.c netflow_v9.c opt_inet.h opt_inet6.h opt_route.h +SRCS= ng_netflow.c netflow.c netflow_v9.c opt_inet.h opt_inet6.h .include <bsd.kmod.mk> diff --git a/sys/modules/netlink/Makefile b/sys/modules/netlink/Makefile index 4abef5106899..6f8205289dd4 100644 --- a/sys/modules/netlink/Makefile +++ b/sys/modules/netlink/Makefile @@ -5,7 +5,7 @@ SRCS = netlink_module.c netlink_domain.c netlink_io.c \ netlink_message_writer.c netlink_generic.c \ netlink_route.c route/iface.c route/iface_drivers.c route/neigh.c \ route/nexthop.c route/rt.c -SRCS+= opt_inet.h opt_inet6.h opt_route.h +SRCS+= opt_inet.h opt_inet6.h CFLAGS+= -DNETLINK_MODULE diff --git a/sys/modules/ufshci/Makefile b/sys/modules/ufshci/Makefile index ab5f3eaf88d0..aa0419d3a6d6 100644 --- a/sys/modules/ufshci/Makefile +++ b/sys/modules/ufshci/Makefile @@ -3,6 +3,7 @@ KMOD = ufshci SRCS = ufshci.c \ + ufshci_acpi.c \ ufshci_pci.c \ ufshci_ctrlr.c \ ufshci_dev.c \ @@ -12,8 +13,10 @@ SRCS = ufshci.c \ ufshci_req_sdb.c \ ufshci_sim.c \ ufshci_sysctl.c \ + acpi_if.h \ bus_if.h \ device_if.h \ + opt_acpi.h \ opt_cam.h \ pci_if.h diff --git a/sys/net/if_types.h b/sys/net/if_types.h index 1d17e5c09813..45923d15175b 100644 --- a/sys/net/if_types.h +++ b/sys/net/if_types.h @@ -174,7 +174,7 @@ typedef enum { IFT_A12MPPSWITCH = 0x82, /* Avalon Parallel Processor */ IFT_TUNNEL = 0x83, /* Encapsulation interface */ IFT_COFFEE = 0x84, /* coffee pot */ - IFT_CES = 0x85, /* Circiut Emulation Service */ + IFT_CES = 0x85, /* Circuit Emulation Service */ IFT_ATMSUBINTERFACE = 0x86, /* (x) ATM Sub Interface */ IFT_L2VLAN = 0x87, /* Layer 2 Virtual LAN using 802.1Q */ IFT_L3IPVLAN = 0x88, /* Layer 3 Virtual LAN - IP Protocol */ diff --git a/sys/net/route.c b/sys/net/route.c index 1a1de22804f0..9ed3a53cbfcb 100644 --- a/sys/net/route.c +++ b/sys/net/route.c @@ -36,7 +36,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_mrouting.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/net/route.h b/sys/net/route.h index 8c713d65ec95..49bb7abd9726 100644 --- a/sys/net/route.h +++ b/sys/net/route.h @@ -126,30 +126,13 @@ VNET_DECLARE(u_int, fib_hash_outbound); /* Outbound flowid generation rules */ #ifdef RSS - #define fib4_calc_packet_hash xps_proto_software_hash_v4 #define fib6_calc_packet_hash xps_proto_software_hash_v6 #define CALC_FLOWID_OUTBOUND_SENDTO true - -#ifdef ROUTE_MPATH -#define CALC_FLOWID_OUTBOUND V_fib_hash_outbound -#else -#define CALC_FLOWID_OUTBOUND false -#endif - #else /* !RSS */ - #define fib4_calc_packet_hash fib4_calc_software_hash #define fib6_calc_packet_hash fib6_calc_software_hash - -#ifdef ROUTE_MPATH #define CALC_FLOWID_OUTBOUND_SENDTO V_fib_hash_outbound -#define CALC_FLOWID_OUTBOUND V_fib_hash_outbound -#else -#define CALC_FLOWID_OUTBOUND_SENDTO false -#define CALC_FLOWID_OUTBOUND false -#endif - #endif /* RSS */ diff --git a/sys/net/route/fib_algo.c b/sys/net/route/fib_algo.c index ca635ad8a7b0..2ba044a31020 100644 --- a/sys/net/route/fib_algo.c +++ b/sys/net/route/fib_algo.c @@ -1714,9 +1714,7 @@ fib_get_rtable_info(struct rib_head *rh, struct rib_rtable_info *rinfo) bzero(rinfo, sizeof(struct rib_rtable_info)); rinfo->num_prefixes = rh->rnh_prefixes; rinfo->num_nhops = nhops_get_count(rh); -#ifdef ROUTE_MPATH rinfo->num_nhgrp = nhgrp_get_count(rh); -#endif } /* @@ -1762,12 +1760,10 @@ fib_get_nhop_array(struct fib_data *fd) static uint32_t get_nhop_idx(struct nhop_object *nh) { -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh)) return (nhgrp_get_idx((struct nhgrp_object *)nh)); - else -#endif - return (nhop_get_idx(nh)); + + return (nhop_get_idx(nh)); } uint32_t diff --git a/sys/net/route/nhgrp.c b/sys/net/route/nhgrp.c index d048e09b045a..37f36b86c975 100644 --- a/sys/net/route/nhgrp.c +++ b/sys/net/route/nhgrp.c @@ -26,7 +26,6 @@ */ #include "opt_inet.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/net/route/nhgrp_ctl.c b/sys/net/route/nhgrp_ctl.c index e26c1fcff33a..8a1fa2113a6c 100644 --- a/sys/net/route/nhgrp_ctl.c +++ b/sys/net/route/nhgrp_ctl.c @@ -25,7 +25,6 @@ * SUCH DAMAGE. */ #include "opt_inet.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/net/route/nhop.c b/sys/net/route/nhop.c index e372c4f7fcd3..dfe9df256f2e 100644 --- a/sys/net/route/nhop.c +++ b/sys/net/route/nhop.c @@ -27,7 +27,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> @@ -115,9 +114,7 @@ destroy_ctl(struct nh_control *ctl) NHOPS_LOCK_DESTROY(ctl); free(ctl->nh_head.ptr, M_NHOP); free(ctl->nh_idx_head.idx, M_NHOP); -#ifdef ROUTE_MPATH nhgrp_ctl_free(ctl); -#endif free(ctl, M_NHOP); } @@ -160,9 +157,7 @@ nhops_destroy_rib(struct rib_head *rh) FIB_RH_LOG(LOG_DEBUG3, rh, "marking nhop %u unlinked", nh_priv->nh_idx); refcount_release(&nh_priv->nh_linked); } CHT_SLIST_FOREACH_END; -#ifdef ROUTE_MPATH nhgrp_ctl_unlink_all(ctl); -#endif NHOPS_WUNLOCK(ctl); /* diff --git a/sys/net/route/nhop_ctl.c b/sys/net/route/nhop_ctl.c index 0c028c7ae877..6c03e621ed82 100644 --- a/sys/net/route/nhop_ctl.c +++ b/sys/net/route/nhop_ctl.c @@ -28,7 +28,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> @@ -644,28 +643,21 @@ nhop_free(struct nhop_object *nh) void nhop_ref_any(struct nhop_object *nh) { -#ifdef ROUTE_MPATH + if (!NH_IS_NHGRP(nh)) nhop_ref_object(nh); else nhgrp_ref_object((struct nhgrp_object *)nh); -#else - nhop_ref_object(nh); -#endif } void nhop_free_any(struct nhop_object *nh) { -#ifdef ROUTE_MPATH if (!NH_IS_NHGRP(nh)) nhop_free(nh); else nhgrp_free((struct nhgrp_object *)nh); -#else - nhop_free(nh); -#endif } /* Nhop-related methods */ @@ -1169,12 +1161,11 @@ nhop_print_buf(const struct nhop_object *nh, char *buf, size_t bufsize) char * nhop_print_buf_any(const struct nhop_object *nh, char *buf, size_t bufsize) { -#ifdef ROUTE_MPATH + if (NH_IS_NHGRP(nh)) return (nhgrp_print_buf((const struct nhgrp_object *)nh, buf, bufsize)); - else -#endif - return (nhop_print_buf(nh, buf, bufsize)); + + return (nhop_print_buf(nh, buf, bufsize)); } /* diff --git a/sys/net/route/nhop_utils.c b/sys/net/route/nhop_utils.c index 0308eb331947..ee4ea969d2ac 100644 --- a/sys/net/route/nhop_utils.c +++ b/sys/net/route/nhop_utils.c @@ -27,7 +27,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/net/route/route_ctl.c b/sys/net/route/route_ctl.c index d629fd1625c3..c6d8d43a73f4 100644 --- a/sys/net/route/route_ctl.c +++ b/sys/net/route/route_ctl.c @@ -82,11 +82,9 @@ static int change_route_byinfo(struct rib_head *rnh, struct rtentry *rt, static int add_route_flags(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data *rnd_add, int op_flags, struct rib_cmd_info *rc); -#ifdef ROUTE_MPATH static int add_route_flags_mpath(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data *rnd_add, struct route_nhop_data *rnd_orig, int op_flags, struct rib_cmd_info *rc); -#endif static int add_route(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data *rnd, struct rib_cmd_info *rc); @@ -100,25 +98,15 @@ static bool fill_pxmask_family(int family, int plen, struct sockaddr *_dst, static int get_prio_from_info(const struct rt_addrinfo *info); static int nhop_get_prio(const struct nhop_object *nh); -#ifdef ROUTE_MPATH static bool rib_can_multipath(struct rib_head *rh); -#endif /* Per-vnet multipath routing configuration */ SYSCTL_DECL(_net_route); #define V_rib_route_multipath VNET(rib_route_multipath) -#ifdef ROUTE_MPATH -#define _MP_FLAGS CTLFLAG_RW VNET_DEFINE(u_int, rib_route_multipath) = 1; -#else -#define _MP_FLAGS CTLFLAG_RD -VNET_DEFINE(u_int, rib_route_multipath) = 0; -#endif -SYSCTL_UINT(_net_route, OID_AUTO, multipath, _MP_FLAGS | CTLFLAG_VNET, +SYSCTL_UINT(_net_route, OID_AUTO, multipath, CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(rib_route_multipath), 0, "Enable route multipath"); -#undef _MP_FLAGS -#ifdef ROUTE_MPATH VNET_DEFINE(u_int, fib_hash_outbound) = 0; SYSCTL_UINT(_net_route, OID_AUTO, hash_outbound, CTLFLAG_RD | CTLFLAG_VNET, &VNET_NAME(fib_hash_outbound), 0, @@ -132,7 +120,6 @@ uint8_t mpath_entropy_key[MPATH_ENTROPY_KEY_LEN] = { 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa, }; -#endif #if defined(INET) && defined(INET6) FEATURE(ipv4_rfc5549_support, "Route IPv4 packets via IPv6 nexthops"); @@ -167,7 +154,6 @@ rib_can_4o6_nhop(void) } #endif -#ifdef ROUTE_MPATH static bool rib_can_multipath(struct rib_head *rh) { @@ -200,7 +186,6 @@ nhop_can_multipath(const struct nhop_object *nh) return (1); } -#endif static int get_info_weight(const struct rt_addrinfo *info, uint32_t default_weight) @@ -602,7 +587,6 @@ rib_del_route_px(uint32_t fibnum, struct sockaddr *dst, int plen, if (rc->rc_cmd == RTM_DELETE) rt_free(rc->rc_rt); -#ifdef ROUTE_MPATH else { /* * Deleting 1 path may result in RTM_CHANGE to @@ -611,7 +595,6 @@ rib_del_route_px(uint32_t fibnum, struct sockaddr *dst, int plen, */ nhop_free_any(rc->rc_nh_old); } -#endif return (0); } @@ -834,7 +817,6 @@ add_route_flags(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data RIB_WUNLOCK(rnh); -#ifdef ROUTE_MPATH if ((op_flags & RTM_F_APPEND) && rib_can_multipath(rnh) && nhop_can_multipath(rnd_add->rnd_nhop) && nhop_can_multipath(rnd_orig.rnd_nhop)) { @@ -857,7 +839,6 @@ add_route_flags(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data } return (error); } -#endif /* Out of options - free state and return error */ error = EEXIST; out: @@ -868,7 +849,6 @@ out: return (error); } -#ifdef ROUTE_MPATH static int add_route_flags_mpath(struct rib_head *rnh, struct rtentry *rt, struct route_nhop_data *rnd_add, struct route_nhop_data *rnd_orig, @@ -912,7 +892,6 @@ add_route_flags_mpath(struct rib_head *rnh, struct rtentry *rt, return (0); } -#endif /* * Removes route defined by @info from the kernel table specified by @fibnum and @@ -981,7 +960,6 @@ rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc if (rc->rc_cmd == RTM_DELETE) rt_free(rc->rc_rt); -#ifdef ROUTE_MPATH else { /* * Deleting 1 path may result in RTM_CHANGE to @@ -990,7 +968,6 @@ rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc */ nhop_free_any(rc->rc_nh_old); } -#endif return (0); } @@ -1008,7 +985,6 @@ rt_delete_conditional(struct rib_head *rnh, struct rtentry *rt, { struct nhop_object *nh = rt->rt_nhop; -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh)) { struct nhgrp_object *nhg = (struct nhgrp_object *)nh; struct route_nhop_data rnd; @@ -1027,7 +1003,6 @@ rt_delete_conditional(struct rib_head *rnh, struct rtentry *rt, } return (error); } -#endif if (cb != NULL && !cb(rt, nh, cbdata)) return (ESRCH); @@ -1129,7 +1104,6 @@ change_nhop(struct rib_head *rnh, struct rt_addrinfo *info, return (error); } -#ifdef ROUTE_MPATH static int change_mpath_route(struct rib_head *rnh, struct rtentry *rt, struct rt_addrinfo *info, struct route_nhop_data *rnd_orig, @@ -1180,7 +1154,6 @@ change_mpath_route(struct rib_head *rnh, struct rtentry *rt, return (error); } -#endif static int change_route_byinfo(struct rib_head *rnh, struct rtentry *rt, @@ -1195,10 +1168,8 @@ change_route_byinfo(struct rib_head *rnh, struct rtentry *rt, if (nh_orig == NULL) return (ESRCH); -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh_orig)) return (change_mpath_route(rnh, rt, info, rnd_orig, rc)); -#endif rnd_new.rnd_weight = get_info_weight(info, rnd_orig->rnd_weight); error = change_nhop(rnh, info, nh_orig, &rnd_new.rnd_nhop); @@ -1448,14 +1419,12 @@ rt_checkdelroute(struct radix_node *rn, void *arg) /* Add to the list and return */ rt->rt_chain = di->head; di->head = rt; -#ifdef ROUTE_MPATH } else { /* * RTM_CHANGE to a different nexthop or nexthop group. * Free old multipath group. */ nhop_free_any(di->rc.rc_nh_old); -#endif } return (0); @@ -1510,7 +1479,6 @@ rib_walk_del(u_int fibnum, int family, rib_filter_f_t *filter_f, void *filter_ar rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc); if (report) { -#ifdef ROUTE_MPATH struct nhgrp_object *nhg; const struct weightened_nhop *wn; uint32_t num_nhops; @@ -1520,8 +1488,7 @@ rib_walk_del(u_int fibnum, int family, rib_filter_f_t *filter_f, void *filter_ar for (int i = 0; i < num_nhops; i++) rt_routemsg(RTM_DELETE, rt, wn[i].nh, fibnum); } else -#endif - rt_routemsg(RTM_DELETE, rt, nh, fibnum); + rt_routemsg(RTM_DELETE, rt, nh, fibnum); } rt_free(rt); } diff --git a/sys/net/route/route_helpers.c b/sys/net/route/route_helpers.c index 2c0df15b04b7..303ff018e9e0 100644 --- a/sys/net/route/route_helpers.c +++ b/sys/net/route/route_helpers.c @@ -28,7 +28,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/jail.h> @@ -257,7 +256,6 @@ rib_lookup(uint32_t fibnum, const struct sockaddr *dst, uint32_t flags, return (nh); } -#ifdef ROUTE_MPATH static void notify_add(struct rib_cmd_info *rc, const struct weightened_nhop *wn_src, route_notification_t *cb, void *cbdata) @@ -410,7 +408,6 @@ rib_decompose_notification(const struct rib_cmd_info *rc, route_notification_t * break; } } -#endif union sockaddr_union { struct sockaddr sa; diff --git a/sys/net/route/route_ifaddrs.c b/sys/net/route/route_ifaddrs.c index cf369846fd59..8da023ff0e9a 100644 --- a/sys/net/route/route_ifaddrs.c +++ b/sys/net/route/route_ifaddrs.c @@ -29,8 +29,6 @@ * SUCH DAMAGE. */ -#include "opt_route.h" - #include <sys/param.h> #include <sys/systm.h> #include <sys/malloc.h> diff --git a/sys/net/route/route_rtentry.c b/sys/net/route/route_rtentry.c index 9440233f0906..b807f3a2d4f2 100644 --- a/sys/net/route/route_rtentry.c +++ b/sys/net/route/route_rtentry.c @@ -28,7 +28,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> @@ -105,21 +104,19 @@ static void destroy_rtentry(struct rtentry *rt) { #ifdef VIMAGE + const struct weightened_nhop *wn; struct nhop_object *nh = rt->rt_nhop; + uint32_t num_nhops; /* * At this moment rnh, nh_control may be already freed. * nhop interface may have been migrated to a different vnet. * Use vnet stored in the nexthop to delete the entry. */ -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh)) { - const struct weightened_nhop *wn; - uint32_t num_nhops; wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); nh = wn[0].nh; } -#endif CURVNET_SET(nhop_get_vnet(nh)); #endif diff --git a/sys/net/route/route_subscription.c b/sys/net/route/route_subscription.c index f3f0aac529c7..daf5f004f7f6 100644 --- a/sys/net/route/route_subscription.c +++ b/sys/net/route/route_subscription.c @@ -26,7 +26,6 @@ */ #include <sys/cdefs.h> -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/net/route/route_var.h b/sys/net/route/route_var.h index 074ebc779719..40433f1b37c0 100644 --- a/sys/net/route/route_var.h +++ b/sys/net/route/route_var.h @@ -277,13 +277,12 @@ struct nhgrp_object { static inline struct nhop_object * nhop_select(struct nhop_object *nh, uint32_t flowid) { + struct nhgrp_object *nhg; -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh)) { - struct nhgrp_object *nhg = (struct nhgrp_object *)nh; + nhg = (struct nhgrp_object *)nh; nh = nhg->nhops[flowid % nhg->nhg_size]; } -#endif return (nh); } diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c index 5251d4d75f15..562cf6d426c9 100644 --- a/sys/net/rtsock.c +++ b/sys/net/rtsock.c @@ -29,7 +29,6 @@ * SUCH DAMAGE. */ #include "opt_ddb.h" -#include "opt_route.h" #include "opt_inet.h" #include "opt_inet6.h" @@ -287,13 +286,12 @@ report_route_event(const struct rib_cmd_info *rc, void *_cbdata) static void rts_handle_route_event(uint32_t fibnum, const struct rib_cmd_info *rc) { -#ifdef ROUTE_MPATH + if ((rc->rc_nh_new && NH_IS_NHGRP(rc->rc_nh_new)) || (rc->rc_nh_old && NH_IS_NHGRP(rc->rc_nh_old))) { rib_decompose_notification(rc, report_route_event, (void *)(uintptr_t)fibnum); } else -#endif report_route_event(rc, (void *)(uintptr_t)fibnum); } static struct rtbridge rtsbridge = { @@ -750,11 +748,12 @@ fill_addrinfo(struct rt_msghdr *rtm, int len, struct linear_buffer *lb, u_int fi static struct nhop_object * select_nhop(struct nhop_object *nh, const struct sockaddr *gw) { - if (!NH_IS_NHGRP(nh)) - return (nh); -#ifdef ROUTE_MPATH const struct weightened_nhop *wn; uint32_t num_nhops; + + if (!NH_IS_NHGRP(nh)) + return (nh); + wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); if (gw == NULL) return (wn[0].nh); @@ -762,7 +761,7 @@ select_nhop(struct nhop_object *nh, const struct sockaddr *gw) if (match_nhop_gw(wn[i].nh, gw)) return (wn[i].nh); } -#endif + return (NULL); } @@ -1029,7 +1028,6 @@ update_rtm_from_rc(struct rt_addrinfo *info, struct rt_msghdr **prtm, return (0); } -#ifdef ROUTE_MPATH static void save_del_notification(const struct rib_cmd_info *rc, void *_cbdata) { @@ -1047,7 +1045,6 @@ save_add_notification(const struct rib_cmd_info *rc, void *_cbdata) if (rc->rc_cmd == RTM_ADD) *rc_new = *rc; } -#endif #if defined(INET6) || defined(INET) static struct sockaddr * @@ -1171,7 +1168,6 @@ rts_send(struct socket *so, int flags, struct mbuf *m, error = rib_action(fibnum, rtm->rtm_type, &info, &rc); if (error == 0) { rtsock_notify_event(fibnum, &rc); -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(rc.rc_nh_new) || (rc.rc_nh_old && NH_IS_NHGRP(rc.rc_nh_old))) { struct rib_cmd_info rc_simple = {}; @@ -1179,7 +1175,7 @@ rts_send(struct socket *so, int flags, struct mbuf *m, save_add_notification, (void *)&rc_simple); rc = rc_simple; } -#endif + /* nh MAY be empty if RTM_CHANGE request is no-op */ nh = rc.rc_nh_new; if (nh != NULL) { @@ -1193,7 +1189,6 @@ rts_send(struct socket *so, int flags, struct mbuf *m, error = rib_action(fibnum, RTM_DELETE, &info, &rc); if (error == 0) { rtsock_notify_event(fibnum, &rc); -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(rc.rc_nh_old) || (rc.rc_nh_new && NH_IS_NHGRP(rc.rc_nh_new))) { struct rib_cmd_info rc_simple = {}; @@ -1201,7 +1196,6 @@ rts_send(struct socket *so, int flags, struct mbuf *m, save_del_notification, (void *)&rc_simple); rc = rc_simple; } -#endif nh = rc.rc_nh_old; } break; @@ -2249,8 +2243,11 @@ rt_dispatch(struct mbuf *m, sa_family_t saf) static int sysctl_dumpentry(struct rtentry *rt, void *vw) { + const struct weightened_nhop *wn; struct walkarg *w = vw; struct nhop_object *nh; + int error; + uint32_t num_nhops; NET_EPOCH_ASSERT(); @@ -2259,11 +2256,7 @@ sysctl_dumpentry(struct rtentry *rt, void *vw) export_rtaddrs(rt, w->dst, w->mask); nh = rt_get_raw_nhop(rt); -#ifdef ROUTE_MPATH if (NH_IS_NHGRP(nh)) { - const struct weightened_nhop *wn; - uint32_t num_nhops; - int error; wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); for (int i = 0; i < num_nhops; i++) { error = sysctl_dumpnhop(rt, wn[i].nh, wn[i].weight, w); @@ -2271,7 +2264,6 @@ sysctl_dumpentry(struct rtentry *rt, void *vw) return (error); } } else -#endif sysctl_dumpnhop(rt, nh, rt->rt_weight, w); return (0); @@ -2701,11 +2693,7 @@ sysctl_rtsock(SYSCTL_HANDLER_ARGS) if (w.w_op == NET_RT_NHOP) error = nhops_dump_sysctl(rnh, w.w_req); else -#ifdef ROUTE_MPATH error = nhgrp_dump_sysctl(rnh, w.w_req); -#else - error = ENOTSUP; -#endif break; case NET_RT_IFLIST: case NET_RT_IFLISTL: diff --git a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c index def589107725..934a5d4d37f6 100644 --- a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c +++ b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c @@ -436,13 +436,10 @@ static const STRUCT_USB_HOST_ID ubt_ignore_devs[] = { USB_VPI(USB_VENDOR_INTEL2, 0x0025, 0) }, { USB_VPI(USB_VENDOR_INTEL2, 0x0026, 0) }, { USB_VPI(USB_VENDOR_INTEL2, 0x0029, 0) }, - - /* - * Some Intel controllers are not yet supported by ng_ubt_intel and - * should be ignored. - */ { USB_VPI(USB_VENDOR_INTEL2, 0x0032, 0) }, { USB_VPI(USB_VENDOR_INTEL2, 0x0033, 0) }, + { USB_VPI(USB_VENDOR_INTEL2, 0x0035, 0) }, + { USB_VPI(USB_VENDOR_INTEL2, 0x0036, 0) }, /* MediaTek MT7925 */ { USB_VPI(USB_VENDOR_AZUREWAVE, 0x3602, 0) }, diff --git a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c index c4410b7b2c80..290c04790a46 100644 --- a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c +++ b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c @@ -101,6 +101,8 @@ static const STRUCT_USB_HOST_ID ubt_intel_devs[] = /* Intel Wireless 9260/9560 and successors */ { USB_VPI(USB_VENDOR_INTEL2, 0x0032, UBT_INTEL_DEVICE_9260) }, { USB_VPI(USB_VENDOR_INTEL2, 0x0033, UBT_INTEL_DEVICE_9260) }, + { USB_VPI(USB_VENDOR_INTEL2, 0x0035, UBT_INTEL_DEVICE_9260) }, + { USB_VPI(USB_VENDOR_INTEL2, 0x0036, UBT_INTEL_DEVICE_9260) }, }; /* diff --git a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_rtl.c b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_rtl.c index f5dcac0a6846..54c3659d0ac9 100644 --- a/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_rtl.c +++ b/sys/netgraph/bluetooth/drivers/ubt/ng_ubt_rtl.c @@ -74,21 +74,19 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = USB_IFACE_SUBCLASS(UDSUBCLASS_RF), USB_IFACE_PROTOCOL(UDPROTO_BLUETOOTH) }, + /* + * Non-Realtek vendors using Realtek Bluetooth chipsets. + * Devices with vendor 0x0bda are already matched by the + * generic rule above. + */ + /* Realtek 8821CE Bluetooth devices */ { USB_VPI(0x13d3, 0x3529, 0) }, - /* Realtek 8822CE Bluetooth devices */ - { USB_VPI(0x0bda, 0xb00c, 0) }, - { USB_VPI(0x0bda, 0xc822, 0) }, - /* Realtek 8851BE Bluetooth devices */ { USB_VPI(0x13d3, 0x3600, 0) }, /* Realtek 8852AE Bluetooth devices */ - { USB_VPI(0x0bda, 0x2852, 0) }, - { USB_VPI(0x0bda, 0xc852, 0) }, - { USB_VPI(0x0bda, 0x385a, 0) }, - { USB_VPI(0x0bda, 0x4852, 0) }, { USB_VPI(0x04c5, 0x165c, 0) }, { USB_VPI(0x04ca, 0x4006, 0) }, { USB_VPI(0x0cb8, 0xc549, 0) }, @@ -105,9 +103,6 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = /* Realtek 8852BE Bluetooth devices */ { USB_VPI(0x0cb8, 0xc559, 0) }, - { USB_VPI(0x0bda, 0x4853, 0) }, - { USB_VPI(0x0bda, 0x887b, 0) }, - { USB_VPI(0x0bda, 0xb85b, 0) }, { USB_VPI(0x13d3, 0x3570, 0) }, { USB_VPI(0x13d3, 0x3571, 0) }, { USB_VPI(0x13d3, 0x3572, 0) }, @@ -115,11 +110,7 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = { USB_VPI(0x0489, 0xe123, 0) }, { USB_VPI(0x0489, 0xe125, 0) }, - /* Realtek 8852BT/8852BE-VT Bluetooth devices */ - { USB_VPI(0x0bda, 0x8520, 0) }, - /* Realtek 8922AE Bluetooth devices */ - { USB_VPI(0x0bda, 0x8922, 0) }, { USB_VPI(0x13d3, 0x3617, 0) }, { USB_VPI(0x13d3, 0x3616, 0) }, { USB_VPI(0x0489, 0xe130, 0) }, @@ -141,7 +132,6 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = { USB_VPI(0x7392, 0xa611, 0) }, /* Realtek 8723DE Bluetooth devices */ - { USB_VPI(0x0bda, 0xb009, 0) }, { USB_VPI(0x2ff8, 0xb011, 0) }, /* Realtek 8761BUV Bluetooth devices */ @@ -149,7 +139,6 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = { USB_VPI(0x2357, 0x0604, 0) }, { USB_VPI(0x0b05, 0x190e, 0) }, { USB_VPI(0x2550, 0x8761, 0) }, - { USB_VPI(0x0bda, 0x8771, 0) }, { USB_VPI(0x6655, 0x8771, 0) }, { USB_VPI(0x7392, 0xc611, 0) }, { USB_VPI(0x2b89, 0x8761, 0) }, @@ -175,7 +164,6 @@ const STRUCT_USB_HOST_ID ubt_rtl_devs[] = { USB_VPI(0x13d3, 0x3555, 0) }, { USB_VPI(0x2ff8, 0x3051, 0) }, { USB_VPI(0x1358, 0xc123, 0) }, - { USB_VPI(0x0bda, 0xc123, 0) }, { USB_VPI(0x0cb5, 0xc547, 0) }, }; const size_t ubt_rtl_devs_sizeof = sizeof(ubt_rtl_devs); diff --git a/sys/netgraph/netflow/netflow.c b/sys/netgraph/netflow/netflow.c index 05c6062463be..3b3878dbcde8 100644 --- a/sys/netgraph/netflow/netflow.c +++ b/sys/netgraph/netflow/netflow.c @@ -33,7 +33,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/bitstring.h> #include <sys/systm.h> diff --git a/sys/netgraph/netflow/netflow_v9.c b/sys/netgraph/netflow/netflow_v9.c index e6b63a8aa36b..2761bd3a0074 100644 --- a/sys/netgraph/netflow/netflow_v9.c +++ b/sys/netgraph/netflow/netflow_v9.c @@ -28,7 +28,6 @@ #include <sys/cdefs.h> #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/counter.h> diff --git a/sys/netgraph/netflow/ng_netflow.c b/sys/netgraph/netflow/ng_netflow.c index 9bf212871fcb..22f58b0876a2 100644 --- a/sys/netgraph/netflow/ng_netflow.c +++ b/sys/netgraph/netflow/ng_netflow.c @@ -33,7 +33,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> diff --git a/sys/netinet/in_fib.c b/sys/netinet/in_fib.c index 0bcda9fdd312..d8e373bf3dc2 100644 --- a/sys/netinet/in_fib.c +++ b/sys/netinet/in_fib.c @@ -66,7 +66,6 @@ CHK_STRUCT_ROUTE_COMPAT(struct route_in, ro_dst4); VNET_DEFINE(struct fib_dp *, inet_dp); #endif -#ifdef ROUTE_MPATH struct _hash_5tuple_ipv4 { struct in_addr src; struct in_addr dst; @@ -97,7 +96,6 @@ fib4_calc_software_hash(struct in_addr src, struct in_addr dst, return (toeplitz_hash(MPATH_ENTROPY_KEY_LEN, mpath_entropy_key, sizeof(data), (uint8_t *)&data)); } -#endif /* * Looks up path in fib @fibnum specified by @dst. @@ -192,19 +190,19 @@ static int check_urpf(struct nhop_object *nh, uint32_t flags, const struct ifnet *src_if) { -#ifdef ROUTE_MPATH + const struct weightened_nhop *wn; + uint32_t num_nhops; + if (NH_IS_NHGRP(nh)) { - const struct weightened_nhop *wn; - uint32_t num_nhops; wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); for (int i = 0; i < num_nhops; i++) { if (check_urpf_nhop(wn[i].nh, flags, src_if) != 0) return (1); } return (0); - } else -#endif - return (check_urpf_nhop(nh, flags, src_if)); + } + + return (check_urpf_nhop(nh, flags, src_if)); } #ifndef FIB_ALGO diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c index e375f0edcc7e..f72260834a96 100644 --- a/sys/netinet/in_pcb.c +++ b/sys/netinet/in_pcb.c @@ -41,7 +41,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ratelimit.h" -#include "opt_route.h" #include "opt_rss.h" #include <sys/param.h> @@ -1167,8 +1166,8 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr_in *sin, struct ucred *cred) MPASS(error == 0); } else in_pcbrehash(inp); -#ifdef ROUTE_MPATH - if (CALC_FLOWID_OUTBOUND) { + + if (V_fib_hash_outbound) { uint32_t hash_val, hash_type; hash_val = fib4_calc_software_hash(inp->inp_laddr, @@ -1178,7 +1177,6 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr_in *sin, struct ucred *cred) inp->inp_flowid = hash_val; inp->inp_flowtype = hash_type; } -#endif if (anonport) inp->inp_flags |= INP_ANONPORT; return (0); diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c index 7de3dc24dc53..5800a0854ee5 100644 --- a/sys/netinet/ip_input.c +++ b/sys/netinet/ip_input.c @@ -33,7 +33,6 @@ #include "opt_inet.h" #include "opt_ipstealth.h" #include "opt_ipsec.h" -#include "opt_route.h" #include "opt_rss.h" #include "opt_sctp.h" diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c index 8af44c6a200d..9431d5f0d7b2 100644 --- a/sys/netinet/ip_output.c +++ b/sys/netinet/ip_output.c @@ -34,7 +34,6 @@ #include "opt_kern_tls.h" #include "opt_mbuf_stress_test.h" #include "opt_ratelimit.h" -#include "opt_route.h" #include "opt_rss.h" #include "opt_sctp.h" diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c index e0e7aed04cd0..9b17d0d80327 100644 --- a/sys/netinet/raw_ip.c +++ b/sys/netinet/raw_ip.c @@ -33,7 +33,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/jail.h> @@ -486,8 +485,7 @@ rip_send(struct socket *so, int pruflags, struct mbuf *m, struct sockaddr *nam, ip->ip_len = htons(m->m_pkthdr.len); ip->ip_src = inp->inp_laddr; ip->ip_dst.s_addr = *dst; -#ifdef ROUTE_MPATH - if (CALC_FLOWID_OUTBOUND) { + if (V_fib_hash_outbound) { uint32_t hash_type, hash_val; hash_val = fib4_calc_software_hash(ip->ip_src, @@ -496,7 +494,6 @@ rip_send(struct socket *so, int pruflags, struct mbuf *m, struct sockaddr *nam, M_HASHTYPE_SET(m, hash_type); flags |= IP_NODEFAULTFLOWID; } -#endif if (jailed(inp->inp_cred)) { /* * prison_local_ip4() would be good enough but would @@ -539,8 +536,7 @@ rip_send(struct socket *so, int pruflags, struct mbuf *m, struct sockaddr *nam, return (EINVAL); ip = mtod(m, struct ip *); } -#ifdef ROUTE_MPATH - if (CALC_FLOWID_OUTBOUND) { + if (V_fib_hash_outbound) { uint32_t hash_type, hash_val; hash_val = fib4_calc_software_hash(ip->ip_dst, @@ -549,7 +545,6 @@ rip_send(struct socket *so, int pruflags, struct mbuf *m, struct sockaddr *nam, M_HASHTYPE_SET(m, hash_type); flags |= IP_NODEFAULTFLOWID; } -#endif INP_RLOCK(inp); /* * Don't allow both user specified and setsockopt options, diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c index 8aa8a7b7fefc..b6bb0221e872 100644 --- a/sys/netinet/tcp_subr.c +++ b/sys/netinet/tcp_subr.c @@ -2216,6 +2216,8 @@ tcp_send_challenge_ack(struct tcpcb *tp, struct tcphdr *th, struct mbuf *m) tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, tp->snd_nxt, TH_ACK); tp->last_ack_sent = tp->rcv_nxt; + } else { + m_freem(m); } } diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c index aadcf0c9ce9c..b0a75127b124 100644 --- a/sys/netinet/tcp_usrreq.c +++ b/sys/netinet/tcp_usrreq.c @@ -396,10 +396,11 @@ tcp_usr_listen(struct socket *so, int backlog, struct thread *td) if (already_listening) goto out; - if (error == 0) + if (error == 0) { in_pcblisten(inp); - if (tp->t_flags & TF_FASTOPEN) - tp->t_tfo_pending = tcp_fastopen_alloc_counter(); + if (tp->t_flags & TF_FASTOPEN) + tp->t_tfo_pending = tcp_fastopen_alloc_counter(); + } out: tcp_bblog_pru(tp, PRU_LISTEN, error); @@ -460,12 +461,11 @@ tcp6_usr_listen(struct socket *so, int backlog, struct thread *td) if (already_listening) goto out; - if (error == 0) + if (error == 0) { in_pcblisten(inp); - if (tp->t_flags & TF_FASTOPEN) - tp->t_tfo_pending = tcp_fastopen_alloc_counter(); - - if (error != 0) + if (tp->t_flags & TF_FASTOPEN) + tp->t_tfo_pending = tcp_fastopen_alloc_counter(); + } else inp->inp_vflag = vflagsav; out: diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h index 8dff330cb46b..a3a42b68c26b 100644 --- a/sys/netinet/tcp_var.h +++ b/sys/netinet/tcp_var.h @@ -85,6 +85,15 @@ #define TCP_EI_BITS_RST_IN_FR 0x200 /* a front state reset */ #define TCP_EI_BITS_2MS_TIMER 0x400 /* 2 MSL timer expired */ +#define TCP_TRK_TRACK_FLG_EMPTY 0x00 /* Available */ +#define TCP_TRK_TRACK_FLG_USED 0x01 /* In use */ +#define TCP_TRK_TRACK_FLG_OPEN 0x02 /* End is not valid (open range request) */ +#define TCP_TRK_TRACK_FLG_SEQV 0x04 /* We had a sendfile that touched it */ +#define TCP_TRK_TRACK_FLG_COMP 0x08 /* Sendfile as placed the last bits (range req only) */ +#define TCP_TRK_TRACK_FLG_FSND 0x10 /* First send has been done into the seq space */ +#define TCP_TRK_TRACK_FLG_LSND 0x20 /* We were able to set the Last Sent */ +#define MAX_TCP_TRK_REQ 5 /* Max we will have at once */ + #if defined(_KERNEL) #include <sys/_callout.h> #include <sys/osd.h> @@ -136,15 +145,6 @@ struct sackhint { STAILQ_HEAD(tcp_log_stailq, tcp_log_mem); -#define TCP_TRK_TRACK_FLG_EMPTY 0x00 /* Available */ -#define TCP_TRK_TRACK_FLG_USED 0x01 /* In use */ -#define TCP_TRK_TRACK_FLG_OPEN 0x02 /* End is not valid (open range request) */ -#define TCP_TRK_TRACK_FLG_SEQV 0x04 /* We had a sendfile that touched it */ -#define TCP_TRK_TRACK_FLG_COMP 0x08 /* Sendfile as placed the last bits (range req only) */ -#define TCP_TRK_TRACK_FLG_FSND 0x10 /* First send has been done into the seq space */ -#define TCP_TRK_TRACK_FLG_LSND 0x20 /* We were able to set the Last Sent */ -#define MAX_TCP_TRK_REQ 5 /* Max we will have at once */ - struct tcp_sendfile_track { uint64_t timestamp; /* User sent timestamp */ uint64_t start; /* Start of sendfile offset */ diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c index 75782765bc98..e8847ea12f1e 100644 --- a/sys/netinet/udp_usrreq.c +++ b/sys/netinet/udp_usrreq.c @@ -39,7 +39,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" -#include "opt_route.h" #include "opt_rss.h" #include <sys/param.h> @@ -1127,7 +1126,7 @@ udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, u_char tos, vflagsav; uint8_t pr; uint16_t cscov = 0; - uint32_t flowid = 0; + uint32_t hash_val, hash_type, flowid = 0; uint8_t flowtype = M_HASHTYPE_NONE; bool use_cached_route; @@ -1487,11 +1486,7 @@ udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, if (flowtype != M_HASHTYPE_NONE) { m->m_pkthdr.flowid = flowid; M_HASHTYPE_SET(m, flowtype); - } -#if defined(ROUTE_MPATH) || defined(RSS) - else if (CALC_FLOWID_OUTBOUND_SENDTO) { - uint32_t hash_val, hash_type; - + } else if (CALC_FLOWID_OUTBOUND_SENDTO) { hash_val = fib4_calc_packet_hash(laddr, faddr, lport, fport, pr, &hash_type); m->m_pkthdr.flowid = hash_val; @@ -1510,7 +1505,6 @@ udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, * hash value based on the packet contents. */ ipflags |= IP_NODEFAULTFLOWID; -#endif /* RSS */ if (pr == IPPROTO_UDPLITE) UDPLITE_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u); diff --git a/sys/netinet6/in6_fib.c b/sys/netinet6/in6_fib.c index 95094a1ace76..c851a5bd14c5 100644 --- a/sys/netinet6/in6_fib.c +++ b/sys/netinet6/in6_fib.c @@ -72,7 +72,6 @@ CHK_STRUCT_ROUTE_COMPAT(struct route_in6, ro_dst); VNET_DEFINE(struct fib_dp *, inet6_dp); #endif -#ifdef ROUTE_MPATH struct _hash_5tuple_ipv6 { struct in6_addr src; struct in6_addr dst; @@ -103,7 +102,6 @@ fib6_calc_software_hash(const struct in6_addr *src, const struct in6_addr *dst, return (toeplitz_hash(MPATH_ENTROPY_KEY_LEN, mpath_entropy_key, sizeof(data), (uint8_t *)&data)); } -#endif /* * Looks up path in fib @fibnum specified by @dst. @@ -201,19 +199,19 @@ static int check_urpf(struct nhop_object *nh, uint32_t flags, const struct ifnet *src_if) { -#ifdef ROUTE_MPATH + const struct weightened_nhop *wn; + uint32_t num_nhops; + if (NH_IS_NHGRP(nh)) { - const struct weightened_nhop *wn; - uint32_t num_nhops; wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops); for (int i = 0; i < num_nhops; i++) { if (check_urpf_nhop(wn[i].nh, flags, src_if) != 0) return (1); } return (0); - } else -#endif - return (check_urpf_nhop(nh, flags, src_if)); + } + + return (check_urpf_nhop(nh, flags, src_if)); } #ifndef FIB_ALGO diff --git a/sys/netinet6/in6_mcast.c b/sys/netinet6/in6_mcast.c index 4e770079e64e..7942faefd568 100644 --- a/sys/netinet6/in6_mcast.c +++ b/sys/netinet6/in6_mcast.c @@ -2806,9 +2806,9 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) ifindex = name[0]; NET_EPOCH_ENTER(et); - ifp = ifnet_byindex(ifindex); + ifp = ifnet_byindex_ref(ifindex); + NET_EPOCH_EXIT(et); if (ifp == NULL) { - NET_EPOCH_EXIT(et); CTR2(KTR_MLD, "%s: no ifp for ifindex %u", __func__, ifindex); return (ENOENT); @@ -2821,7 +2821,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) retval = sysctl_wire_old_buffer(req, sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); if (retval) { - NET_EPOCH_EXIT(et); + if_rele(ifp); return (retval); } @@ -2856,7 +2856,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) } IN6_MULTI_LIST_UNLOCK(); IN6_MULTI_UNLOCK(); - NET_EPOCH_EXIT(et); + if_rele(ifp); return (retval); } diff --git a/sys/netinet6/in6_pcb.c b/sys/netinet6/in6_pcb.c index 6bea94160eb2..216051156767 100644 --- a/sys/netinet6/in6_pcb.c +++ b/sys/netinet6/in6_pcb.c @@ -67,7 +67,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" -#include "opt_route.h" #include "opt_rss.h" #include <sys/hash.h> @@ -449,8 +448,7 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr_in6 *sin6, struct ucred *cred, bzero(&laddr6, sizeof(laddr6)); laddr6.sin6_family = AF_INET6; -#ifdef ROUTE_MPATH - if (CALC_FLOWID_OUTBOUND) { + if (V_fib_hash_outbound) { uint32_t hash_type, hash_val; hash_val = fib6_calc_software_hash(&inp->in6p_laddr, @@ -459,7 +457,6 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr_in6 *sin6, struct ucred *cred, inp->inp_flowid = hash_val; inp->inp_flowtype = hash_type; } -#endif /* * Call inner routine, to assign local interface address. * in6_pcbladdr() may automatically fill in sin6_scope_id. diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c index 560698a5e6ad..784b824478fc 100644 --- a/sys/netinet6/in6_proto.c +++ b/sys/netinet6/in6_proto.c @@ -65,7 +65,6 @@ #include "opt_ipsec.h" #include "opt_ipstealth.h" #include "opt_sctp.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/socket.h> diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c index 20b092cf6fd2..0dae879c1bd5 100644 --- a/sys/netinet6/ip6_input.c +++ b/sys/netinet6/ip6_input.c @@ -63,7 +63,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" -#include "opt_route.h" #include "opt_rss.h" #include "opt_sctp.h" diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c index b7ba3c4c080c..29374a39e336 100644 --- a/sys/netinet6/ip6_output.c +++ b/sys/netinet6/ip6_output.c @@ -65,7 +65,6 @@ #include "opt_ipsec.h" #include "opt_kern_tls.h" #include "opt_ratelimit.h" -#include "opt_route.h" #include "opt_rss.h" #include "opt_sctp.h" diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c index 88f159a6af43..48e80bb75e0b 100644 --- a/sys/netinet6/nd6.c +++ b/sys/netinet6/nd6.c @@ -33,7 +33,6 @@ #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/systm.h> @@ -1637,13 +1636,10 @@ check_release_defrouter(const struct rib_cmd_info *rc, void *_cbdata) void nd6_subscription_cb(struct rib_head *rnh, struct rib_cmd_info *rc, void *arg) { -#ifdef ROUTE_MPATH + rib_decompose_notification(rc, check_release_defrouter, NULL); if (rc->rc_cmd == RTM_DELETE && !NH_IS_NHGRP(rc->rc_nh_old)) check_release_defrouter(rc, NULL); -#else - check_release_defrouter(rc, NULL); -#endif } int diff --git a/sys/netinet6/raw_ip6.c b/sys/netinet6/raw_ip6.c index f3153b5435e2..9e51005d7c2e 100644 --- a/sys/netinet6/raw_ip6.c +++ b/sys/netinet6/raw_ip6.c @@ -61,7 +61,6 @@ #include "opt_ipsec.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/param.h> #include <sys/errno.h> @@ -361,6 +360,7 @@ rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, int use_defzone = 0; int hlim = 0; struct in6_addr in6a; + uint32_t hash_type, hash_val; inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip6_send: inp == NULL")); @@ -452,16 +452,12 @@ rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, } ip6 = mtod(m, struct ip6_hdr *); -#ifdef ROUTE_MPATH - if (CALC_FLOWID_OUTBOUND) { - uint32_t hash_type, hash_val; - + if (V_fib_hash_outbound) { hash_val = fib6_calc_software_hash(&inp->in6p_laddr, &dstsock->sin6_addr, 0, 0, inp->inp_ip_p, &hash_type); inp->inp_flowid = hash_val; inp->inp_flowtype = hash_type; } -#endif /* * Source address selection. */ diff --git a/sys/netinet6/udp6_usrreq.c b/sys/netinet6/udp6_usrreq.c index 1d1dcb75a1df..729be392668a 100644 --- a/sys/netinet6/udp6_usrreq.c +++ b/sys/netinet6/udp6_usrreq.c @@ -70,7 +70,6 @@ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" -#include "opt_route.h" #include "opt_rss.h" #include <sys/param.h> @@ -940,7 +939,6 @@ udp6_send(struct socket *so, int flags_arg, struct mbuf *m, } flags = 0; -#if defined(ROUTE_MPATH) || defined(RSS) if (CALC_FLOWID_OUTBOUND_SENDTO) { uint32_t hash_type, hash_val; uint8_t pr; @@ -954,7 +952,6 @@ udp6_send(struct socket *so, int flags_arg, struct mbuf *m, } /* do not use inp flowid */ flags |= IP_NODEFAULTFLOWID; -#endif UDPSTAT_INC(udps_opackets); if (nxt == IPPROTO_UDPLITE) diff --git a/sys/netlink/route/nexthop.c b/sys/netlink/route/nexthop.c index 30aa3dd72534..314fb66431b9 100644 --- a/sys/netlink/route/nexthop.c +++ b/sys/netlink/route/nexthop.c @@ -28,7 +28,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/types.h> #include <sys/ck.h> #include <sys/epoch.h> @@ -268,12 +267,10 @@ nl_find_base_unhop(struct unhop_ctl *ctl, uint32_t uidx) static struct nhop_object * clone_unhop(const struct user_nhop *unhop, uint32_t fibnum, int family, int nh_flags) { -#ifdef ROUTE_MPATH const struct weightened_nhop *wn; struct weightened_nhop *wn_new, wn_base[MAX_STACK_NHOPS]; - uint32_t num_nhops; -#endif struct nhop_object *nh = NULL; + uint32_t num_nhops; int error; if (unhop->un_nhop_src != NULL) { @@ -298,10 +295,9 @@ clone_unhop(const struct user_nhop *unhop, uint32_t fibnum, int family, int nh_f nhop_set_pxtype_flag(nh, nh_flags); return (nhop_get_nhop(nh, &error)); } -#ifdef ROUTE_MPATH + wn = unhop->un_nhgrp_src; num_nhops = unhop->un_nhgrp_count; - if (num_nhops > MAX_STACK_NHOPS) { wn_new = malloc(num_nhops * sizeof(struct weightened_nhop), M_TEMP, M_NOWAIT); if (wn_new == NULL) @@ -328,7 +324,7 @@ clone_unhop(const struct user_nhop *unhop, uint32_t fibnum, int family, int nh_f if (wn_new != wn_base) free(wn_new, M_TEMP); -#endif + return (nh); } diff --git a/sys/netlink/route/rt.c b/sys/netlink/route/rt.c index 4d7f676d2aec..39ae65217620 100644 --- a/sys/netlink/route/rt.c +++ b/sys/netlink/route/rt.c @@ -29,7 +29,6 @@ #include <sys/cdefs.h> #include "opt_inet.h" #include "opt_inet6.h" -#include "opt_route.h" #include <sys/types.h> #include <sys/malloc.h> #include <sys/rmlock.h> @@ -67,20 +66,21 @@ get_rtm_type(const struct nhop_object *nh) static uint8_t nl_get_rtm_protocol(const struct nhop_object *nh) { -#ifdef ROUTE_MPATH + const struct nhgrp_object *nhg = (const struct nhgrp_object *)nh; + int rt_flags; + uint8_t origin; + if (NH_IS_NHGRP(nh)) { - const struct nhgrp_object *nhg = (const struct nhgrp_object *)nh; - uint8_t origin = nhgrp_get_origin(nhg); + origin = nhgrp_get_origin(nhg); if (origin != RTPROT_UNSPEC) return (origin); nh = nhg->nhops[0]; } -#endif - uint8_t origin = nhop_get_origin(nh); + origin = nhop_get_origin(nh); if (origin != RTPROT_UNSPEC) return (origin); /* TODO: remove guesswork once all kernel users fill in origin */ - int rt_flags = nhop_get_rtflags(nh); + rt_flags = nhop_get_rtflags(nh); if (rt_flags & RTF_PROTO1) return (RTPROT_ZEBRA); if (rt_flags & RTF_STATIC) @@ -174,7 +174,6 @@ dump_rc_nhop_mtu(struct nl_writer *nw, const struct nhop_object *nh) *((uint32_t *)(nla + 1)) = nh->nh_mtu; } -#ifdef ROUTE_MPATH static void dump_rc_nhg(struct nl_writer *nw, const struct nhgrp_object *nhg, struct rtmsg *rtm) { @@ -218,20 +217,19 @@ dump_rc_nhg(struct nl_writer *nw, const struct nhgrp_object *nhg, struct rtmsg * } nlattr_set_len(nw, off); } -#endif static void dump_rc_nhop(struct nl_writer *nw, const struct route_nhop_data *rnd, struct rtmsg *rtm) { -#ifdef ROUTE_MPATH + const struct nhop_object *nh = rnd->rnd_nhop; + uint32_t rtflags, uidx, nh_expire; + if (NH_IS_NHGRP(rnd->rnd_nhop)) { dump_rc_nhg(nw, rnd->rnd_nhgrp, rtm); return; } -#endif - const struct nhop_object *nh = rnd->rnd_nhop; - uint32_t rtflags = nhop_get_rtflags(nh); + rtflags = nhop_get_rtflags(nh); /* * IPv4 over IPv6 * ('RTA_VIA', {'family': 10, 'addr': 'fe80::20c:29ff:fe67:2dd'}), ('RTA_OIF', 2), @@ -243,7 +241,7 @@ dump_rc_nhop(struct nl_writer *nw, const struct route_nhop_data *rnd, struct rtm if (nh->nh_flags & NHF_GATEWAY) dump_rc_nhop_gw(nw, nh); - uint32_t uidx = nhop_get_uidx(nh); + uidx = nhop_get_uidx(nh); if (uidx != 0) nlattr_add_u32(nw, NL_RTA_NH_ID, uidx); nlattr_add_u32(nw, NL_RTA_KNH_ID, nhop_get_idx(nh)); @@ -251,7 +249,7 @@ dump_rc_nhop(struct nl_writer *nw, const struct route_nhop_data *rnd, struct rtm if (rtflags & RTF_FIXEDMTU) dump_rc_nhop_mtu(nw, nh); - uint32_t nh_expire = nhop_get_expire(nh); + nh_expire = nhop_get_expire(nh); if (nh_expire > 0) nlattr_add_u32(nw, NL_RTA_EXPIRES, nh_expire - time_uptime); @@ -818,7 +816,6 @@ get_op_flags(int nlm_flags) return (op_flags); } -#ifdef ROUTE_MPATH static int create_nexthop_one(struct nl_parsed_route *attrs, struct rta_mpath_nh *mpnh, struct nl_pstate *npt, struct nhop_object **pnh) @@ -848,7 +845,6 @@ create_nexthop_one(struct nl_parsed_route *attrs, struct rta_mpath_nh *mpnh, return (error); } -#endif static struct nhop_object * create_nexthop_from_attrs(struct nl_parsed_route *attrs, @@ -859,7 +855,6 @@ create_nexthop_from_attrs(struct nl_parsed_route *attrs, uint32_t nh_expire = 0; if (attrs->rta_multipath != NULL) { -#ifdef ROUTE_MPATH /* Multipath w/o explicit nexthops */ int num_nhops = attrs->rta_multipath->num_nhops; struct weightened_nhop *wn = npt_alloc(npt, sizeof(*wn) * num_nhops); @@ -892,9 +887,6 @@ create_nexthop_from_attrs(struct nl_parsed_route *attrs, return ((struct nhop_object *)nhg); error = *perror; } -#else - error = ENOTSUP; -#endif *perror = error; } else { nh = nhop_alloc(attrs->rta_table, attrs->rtm_family); diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c index 12a2189207f1..a2e12b3065d0 100644 --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -1359,10 +1359,18 @@ pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) PF_MD5_UPD(pfr, addr.v.tblname); break; case PF_ADDR_ADDRMASK: + case PF_ADDR_RANGE: /* XXX ignore af? */ PF_MD5_UPD(pfr, addr.v.a.addr.addr32); PF_MD5_UPD(pfr, addr.v.a.mask.addr32); break; + case PF_ADDR_NONE: + case PF_ADDR_NOROUTE: + case PF_ADDR_URPFFAILED: + /* These do not use any address data. */ + break; + default: + panic("Unknown address type %d", pfr->addr.type); } PF_MD5_UPD(pfr, port[0]); @@ -1372,6 +1380,30 @@ pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) } static void +pf_hash_pool(MD5_CTX *ctx, struct pf_kpool *pool) +{ + uint16_t x; + int y; + + if (pool->cur) { + PF_MD5_UPD(pool, cur->addr); + PF_MD5_UPD_STR(pool, cur->ifname); + PF_MD5_UPD(pool, cur->af); + } + PF_MD5_UPD(pool, key); + PF_MD5_UPD(pool, counter); + + PF_MD5_UPD(pool, mape.offset); + PF_MD5_UPD(pool, mape.psidlen); + PF_MD5_UPD_HTONS(pool, mape.psid, x); + PF_MD5_UPD_HTONL(pool, tblidx, y); + PF_MD5_UPD_HTONS(pool, proxy_port[0], x); + PF_MD5_UPD_HTONS(pool, proxy_port[1], x); + PF_MD5_UPD(pool, opts); + PF_MD5_UPD(pool, ipv6_nexthop_af); +} + +static void pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) { u_int16_t x; @@ -1381,39 +1413,96 @@ pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) pf_hash_rule_addr(ctx, &rule->dst); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) PF_MD5_UPD_STR(rule, label[i]); + PF_MD5_UPD_HTONL(rule, ridentifier, y); PF_MD5_UPD_STR(rule, ifname); PF_MD5_UPD_STR(rule, rcv_ifname); + PF_MD5_UPD_STR(rule, qname); + PF_MD5_UPD_STR(rule, pqname); + PF_MD5_UPD_STR(rule, tagname); PF_MD5_UPD_STR(rule, match_tagname); - PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ + + PF_MD5_UPD_STR(rule, overload_tblname); + + pf_hash_pool(ctx, &rule->nat); + pf_hash_pool(ctx, &rule->rdr); + pf_hash_pool(ctx, &rule->route); + PF_MD5_UPD_HTONL(rule, pktrate.limit, y); + PF_MD5_UPD_HTONL(rule, pktrate.seconds, y); + PF_MD5_UPD_HTONL(rule, os_fingerprint, y); + + PF_MD5_UPD_HTONL(rule, rtableid, y); + for (int i = 0; i < PFTM_MAX; i++) + PF_MD5_UPD_HTONL(rule, timeout[i], y); + PF_MD5_UPD_HTONL(rule, max_states, y); + PF_MD5_UPD_HTONL(rule, max_src_nodes, y); + PF_MD5_UPD_HTONL(rule, max_src_states, y); + PF_MD5_UPD_HTONL(rule, max_src_conn, y); + PF_MD5_UPD_HTONL(rule, max_src_conn_rate.limit, y); + PF_MD5_UPD_HTONL(rule, max_src_conn_rate.seconds, y); + PF_MD5_UPD_HTONS(rule, max_pkt_size, y); + PF_MD5_UPD_HTONS(rule, qid, x); + PF_MD5_UPD_HTONS(rule, pqid, x); + PF_MD5_UPD_HTONS(rule, dnpipe, x); + PF_MD5_UPD_HTONS(rule, dnrpipe, x); + PF_MD5_UPD_HTONL(rule, free_flags, y); PF_MD5_UPD_HTONL(rule, prob, y); + + PF_MD5_UPD_HTONS(rule, return_icmp, x); + PF_MD5_UPD_HTONS(rule, return_icmp6, x); + PF_MD5_UPD_HTONS(rule, max_mss, x); + PF_MD5_UPD_HTONS(rule, tag, x); /* dup? */ + PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ + PF_MD5_UPD_HTONS(rule, scrub_flags, x); + + PF_MD5_UPD(rule, uid.op); PF_MD5_UPD_HTONL(rule, uid.uid[0], y); PF_MD5_UPD_HTONL(rule, uid.uid[1], y); - PF_MD5_UPD(rule, uid.op); + PF_MD5_UPD(rule, gid.op); PF_MD5_UPD_HTONL(rule, gid.gid[0], y); PF_MD5_UPD_HTONL(rule, gid.gid[1], y); - PF_MD5_UPD(rule, gid.op); + PF_MD5_UPD_HTONL(rule, rule_flag, y); + PF_MD5_UPD_HTONL(rule, rule_ref, y); PF_MD5_UPD(rule, action); PF_MD5_UPD(rule, direction); - PF_MD5_UPD(rule, af); + PF_MD5_UPD(rule, log); + PF_MD5_UPD(rule, logif); PF_MD5_UPD(rule, quick); PF_MD5_UPD(rule, ifnot); - PF_MD5_UPD(rule, rcvifnot); PF_MD5_UPD(rule, match_tag_not); PF_MD5_UPD(rule, natpass); + PF_MD5_UPD(rule, keep_state); + PF_MD5_UPD(rule, af); PF_MD5_UPD(rule, proto); - PF_MD5_UPD(rule, type); - PF_MD5_UPD(rule, code); + PF_MD5_UPD_HTONS(rule, type, x); + PF_MD5_UPD_HTONS(rule, code, x); PF_MD5_UPD(rule, flags); PF_MD5_UPD(rule, flagset); + PF_MD5_UPD(rule, min_ttl); PF_MD5_UPD(rule, allow_opts); PF_MD5_UPD(rule, rt); + PF_MD5_UPD(rule, return_ttl); PF_MD5_UPD(rule, tos); - PF_MD5_UPD(rule, scrub_flags); - PF_MD5_UPD(rule, min_ttl); PF_MD5_UPD(rule, set_tos); + PF_MD5_UPD(rule, anchor_relative); + PF_MD5_UPD(rule, anchor_wildcard); + + PF_MD5_UPD(rule, flush); + PF_MD5_UPD(rule, prio); + PF_MD5_UPD(rule, set_prio[0]); + PF_MD5_UPD(rule, set_prio[1]); + PF_MD5_UPD(rule, naf); + PF_MD5_UPD(rule, rcvifnot); + PF_MD5_UPD(rule, statelim.id); + PF_MD5_UPD_HTONL(rule, statelim.limiter_action, y); + PF_MD5_UPD(rule, sourcelim.id); + PF_MD5_UPD_HTONL(rule, sourcelim.limiter_action, y); + + PF_MD5_UPD(rule, divert.addr); + PF_MD5_UPD_HTONS(rule, divert.port, x); + if (rule->anchor != NULL) PF_MD5_UPD_STR(rule, anchor->path); } diff --git a/sys/ofed/include/rdma/ib_mad.h b/sys/ofed/include/rdma/ib_mad.h index 454db78dbde5..45f5ee2ff2ca 100644 --- a/sys/ofed/include/rdma/ib_mad.h +++ b/sys/ofed/include/rdma/ib_mad.h @@ -558,7 +558,7 @@ struct ib_mad_send_wc { * ib_mad_recv_buf - received MAD buffer information. * @list: Reference to next data buffer for a received RMPP MAD. * @grh: References a data buffer containing the global route header. - * The data refereced by this buffer is only valid if the GRH is + * The data referenced by this buffer is only valid if the GRH is * valid. * @mad: References the start of the received MAD. */ diff --git a/sys/powerpc/conf/GENERIC64 b/sys/powerpc/conf/GENERIC64 index 4f61b34a9f18..8daf5353263a 100644 --- a/sys/powerpc/conf/GENERIC64 +++ b/sys/powerpc/conf/GENERIC64 @@ -41,7 +41,6 @@ options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 -options ROUTE_MPATH # Multipath routing support options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging options TCP_HHOOK # hhook(9) framework for TCP diff --git a/sys/riscv/conf/GENERIC b/sys/riscv/conf/GENERIC index 0761784c095f..827d5efef50b 100644 --- a/sys/riscv/conf/GENERIC +++ b/sys/riscv/conf/GENERIC @@ -30,7 +30,6 @@ options INET # InterNETworking options INET6 # IPv6 communications protocols options TCP_HHOOK # hhook(9) framework for TCP options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 -options ROUTE_MPATH # Multipath routing support options FIB_ALGO # Modular fib lookups options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging diff --git a/sys/rpc/clnt_bck.c b/sys/rpc/clnt_bck.c index 7e4781ec7e9d..c5cbbf045bdc 100644 --- a/sys/rpc/clnt_bck.c +++ b/sys/rpc/clnt_bck.c @@ -314,10 +314,8 @@ call_again: */ sx_xlock(&xprt->xp_lock); error = sosend(xprt->xp_socket, NULL, NULL, mreq, NULL, 0, curthread); -if (error != 0) printf("sosend=%d\n", error); mreq = NULL; if (error == EMSGSIZE) { -printf("emsgsize\n"); SOCK_SENDBUF_LOCK(xprt->xp_socket); sbwait(xprt->xp_socket, SO_SND); SOCK_SENDBUF_UNLOCK(xprt->xp_socket); diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c index 35c904560836..528112d5642a 100644 --- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c +++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c @@ -1170,6 +1170,15 @@ svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, memset(rpchdr, 0, sizeof(rpchdr)); + oa = &msg->rm_call.cb_cred; + + if (oa->oa_length > sizeof(rpchdr) - 8 * BYTES_PER_XDR_UNIT) { + rpc_gss_log_debug("auth length %d exceeds maximum", + oa->oa_length); + client->cl_state = CLIENT_STALE; + return (FALSE); + } + /* Reconstruct RPC header for signing (from xdr_callmsg). */ buf = rpchdr; IXDR_PUT_LONG(buf, msg->rm_xid); @@ -1178,7 +1187,6 @@ svc_rpc_gss_validate(struct svc_rpc_gss_client *client, struct rpc_msg *msg, IXDR_PUT_LONG(buf, msg->rm_call.cb_prog); IXDR_PUT_LONG(buf, msg->rm_call.cb_vers); IXDR_PUT_LONG(buf, msg->rm_call.cb_proc); - oa = &msg->rm_call.cb_cred; IXDR_PUT_ENUM(buf, oa->oa_flavor); IXDR_PUT_LONG(buf, oa->oa_length); if (oa->oa_length) { diff --git a/sys/security/mac_do/mac_do.c b/sys/security/mac_do/mac_do.c index 2bcff7bba973..ba49da22ce67 100644 --- a/sys/security/mac_do/mac_do.c +++ b/sys/security/mac_do/mac_do.c @@ -1153,13 +1153,14 @@ remove_rules(struct prison *const pr) prison_lock(pr); /* - * We go to the burden of extracting rules first instead of just letting - * osd_jail_del() calling dealloc_jail_osd() as we want to decrement - * their use count, and possibly free them, outside of the prison lock. + * We burden ourselves with extracting rules first instead of just + * letting osd_jail_del() call dealloc_jail_osd() as we want to + * decrement their use count, and possibly free them, outside of the + * prison lock. */ old_rules = osd_jail_get(pr, osd_jail_slot); error = osd_jail_set(pr, osd_jail_slot, NULL); - /* osd_set() never fails nor allocate memory when 'value' is NULL. */ + /* osd_set() never allocates memory when 'value' is NULL, nor fails. */ MPASS(error == 0); /* * This completely frees the OSD slot, but doesn't call the destructor diff --git a/sys/sys/filedesc.h b/sys/sys/filedesc.h index 4817855443af..c6499a18b884 100644 --- a/sys/sys/filedesc.h +++ b/sys/sys/filedesc.h @@ -212,6 +212,8 @@ struct filedesc_to_leader { #ifdef _KERNEL +#include <machine/atomic.h> + /* Operation types for kern_dup(). */ enum { FDDUP_NORMAL, /* dup() behavior. */ @@ -303,6 +305,21 @@ int fget_only_user(struct filedesc *fdp, int fd, MPASS(refcount_load(&fp->f_count) > 0); \ }) +/* + * Look up a file description without requiring a lock. In general the result + * may be immediately invalidated after the function returns, the caller must + * handle this. + */ +static inline struct file * +fget_noref_unlocked(struct filedesc *fdp, int fd) +{ + if (__predict_false( + (u_int)fd >= (u_int)atomic_load_int(&fdp->fd_nfiles))) + return (NULL); + + return (atomic_load_ptr(&fdp->fd_ofiles[fd].fde_file)); +} + /* Requires a FILEDESC_{S,X}LOCK held and returns without a ref. */ static __inline struct file * fget_noref(struct filedesc *fdp, int fd) diff --git a/sys/sys/pmc.h b/sys/sys/pmc.h index 4c160c000dab..21dbf48e976b 100644 --- a/sys/sys/pmc.h +++ b/sys/sys/pmc.h @@ -110,6 +110,7 @@ extern char pmc_cpuid[PMC_CPUID_LEN]; __PMC_CPU(INTEL_EMERALD_RAPIDS, 0xA0, "Intel Emerald Rapids") \ __PMC_CPU(INTEL_ALDERLAKEN, 0xA1, "Intel AlderlakeN") \ __PMC_CPU(INTEL_GRANITE_RAPIDS, 0xA2, "Intel Granite Rapids") \ + __PMC_CPU(INTEL_METEOR_LAKE, 0xA3, "Intel Meteorlake") \ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ @@ -661,7 +662,9 @@ struct pmc_op_caps { #define PMC_HASH_SIZE 1024 #define PMC_MTXPOOL_SIZE 2048 #define PMC_LOG_BUFFER_SIZE 256 +#define PMC_LOG_BUFFER_SIZE_MAX (16 * 1024) #define PMC_NLOGBUFFERS_PCPU 32 +#define PMC_NLOGBUFFERS_PCPU_MEM_MAX (32 * 1024) #define PMC_NSAMPLES 256 #define PMC_CALLCHAIN_DEPTH 128 #define PMC_THREADLIST_MAX 128 diff --git a/sys/tools/sound/emu10k1-mkalsa.sh b/sys/tools/sound/emu10k1-mkalsa.sh deleted file mode 100644 index c6a2ba462560..000000000000 --- a/sys/tools/sound/emu10k1-mkalsa.sh +++ /dev/null @@ -1,20 +0,0 @@ - -GREP=${GREP:-grep} -CC=${CC:-cc} -AWK=${AWK:-awk} -MV=${MV:=mv} -RM=${RM:=rm} -IN=$1 -OUT=$2 - -trap "${RM} -f $OUT.tmp" EXIT - -$GREP -v '#include' $IN | \ -$CC -E -D__KERNEL__ -dM - | \ -$AWK -F"[ (]" ' -/define/ { - print "#ifndef " $2; - print; - print "#endif"; -}' > $OUT.tmp -${MV} -f $OUT.tmp $OUT diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 88438320a17a..125311912c20 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -645,6 +645,8 @@ vm_fault_populate(struct faultstate *fs) pager_last = map_last; } for (pidx = pager_first; pidx <= pager_last; pidx += npages) { + bool writeable; + m = vm_page_lookup(fs->first_object, pidx); vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; KASSERT(m != NULL && m->pindex == pidx, @@ -655,14 +657,28 @@ vm_fault_populate(struct faultstate *fs) !pmap_ps_enabled(fs->map->pmap))) psind--; + writeable = (fs->prot & VM_PROT_WRITE) != 0; npages = atop(pagesizes[psind]); for (i = 0; i < npages; i++) { vm_fault_populate_check_page(&m[i]); vm_fault_dirty(fs, &m[i]); + + /* + * If this is a writeable superpage mapping, all + * constituent pages and the new mapping should be + * dirty, otherwise the mapping should be read-only. + */ + if (writeable && psind > 0 && + (m[i].oflags & VPO_UNMANAGED) == 0 && + m[i].dirty != VM_PAGE_BITS_ALL) + writeable = false; } + if (psind > 0 && writeable) + fs->fault_type |= VM_PROT_WRITE; VM_OBJECT_WUNLOCK(fs->first_object); - rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | - (fs->wired ? PMAP_ENTER_WIRED : 0), psind); + rv = pmap_enter(fs->map->pmap, vaddr, m, + fs->prot & ~(writeable ? 0 : VM_PROT_WRITE), + fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); /* * pmap_enter() may fail for a superpage mapping if additional diff --git a/sys/x86/conf/NOTES b/sys/x86/conf/NOTES index 877cbb3beb7f..3ae80fbe45c5 100644 --- a/sys/x86/conf/NOTES +++ b/sys/x86/conf/NOTES @@ -390,6 +390,9 @@ device vmd # PMC-Sierra SAS/SATA controller device pmspcv +device ocs_fc # Emulex FC adapters +device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller + # # Standard floppy disk controllers and floppy tapes, supports # the Y-E DATA External FDD (PC Card) @@ -442,6 +445,7 @@ device cpufreq # wpi: Intel 3945ABG Wireless LAN controller # Requires the wpi firmware module +device aq # Aquantia / Marvell AQC1xx device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device gve # Google Virtual NIC (gVNIC) device igc # Intel I225 2.5G Ethernet @@ -455,6 +459,7 @@ device mlx4 # Shared code module between IB and Ethernet device mlx4ib # Mellanox ConnectX HCA InfiniBand device mlx4en # Mellanox ConnectX HCA Ethernet device nfe # nVidia nForce MCP on-board Ethernet +device vge # VIA VT612x gigabit Ethernet device vmx # VMware VMXNET3 Ethernet device wpi # Intel 3945ABG wireless NICs. diff --git a/sys/x86/cpufreq/hwpstate_amd.c b/sys/x86/cpufreq/hwpstate_amd.c index 2649fcc5779f..ccf13f06a6d1 100644 --- a/sys/x86/cpufreq/hwpstate_amd.c +++ b/sys/x86/cpufreq/hwpstate_amd.c @@ -152,6 +152,13 @@ struct hwpstate_setting { #define HWPFL_USE_CPPC (1 << 0) #define HWPFL_CPPC_REQUEST_NOT_READ (1 << 1) +struct hwpstate_cpufreq_methods { + int (*get)(device_t dev, struct cf_setting *cf); + int (*set)(device_t dev, const struct cf_setting *cf); + int (*settings)(device_t dev, struct cf_setting *sets, int *count); + int (*type)(device_t dev, int *type); +}; + /* * Atomicity is achieved by only modifying a given softc on its associated CPU * and with interrupts disabled. @@ -161,6 +168,7 @@ struct hwpstate_setting { struct hwpstate_softc { device_t dev; u_int flags; + const struct hwpstate_cpufreq_methods *cpufreq_methods; union { struct { struct hwpstate_setting @@ -620,80 +628,105 @@ hwpstate_goto_pstate(device_t dev, int id) } static int -hwpstate_set(device_t dev, const struct cf_setting *cf) +hwpstate_set_cppc(device_t dev __unused, const struct cf_setting *cf __unused) +{ + return (EOPNOTSUPP); +} + +static int +hwpstate_set_pstate(device_t dev, const struct cf_setting *cf) { struct hwpstate_softc *sc; struct hwpstate_setting *set; int i; - if (cf == NULL) - return (EINVAL); sc = device_get_softc(dev); - if ((sc->flags & HWPFL_USE_CPPC) != 0) - return (EOPNOTSUPP); set = sc->hwpstate_settings; for (i = 0; i < sc->cfnum; i++) if (CPUFREQ_CMP(cf->freq, set[i].freq)) break; if (i == sc->cfnum) return (EINVAL); - return (hwpstate_goto_pstate(dev, set[i].pstate_id)); } static int -hwpstate_get(device_t dev, struct cf_setting *cf) +hwpstate_set(device_t dev, const struct cf_setting *cf) +{ + struct hwpstate_softc *sc = device_get_softc(dev); + + if (cf == NULL) + return (EINVAL); + return (sc->cpufreq_methods->set(dev, cf)); +} + +static int +hwpstate_get_cppc(device_t dev, struct cf_setting *cf) { - struct hwpstate_softc *sc; - struct hwpstate_setting set; struct pcpu *pc; - uint64_t msr; uint64_t rate; int ret; + pc = cpu_get_pcpu(dev); + if (pc == NULL) + return (ENXIO); + + memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); + cf->dev = dev; + if ((ret = cpu_est_clockrate(pc->pc_cpuid, &rate))) + return (ret); + cf->freq = rate / 1000000; + return (0); +} + +static int +hwpstate_get_pstate(device_t dev, struct cf_setting *cf) +{ + struct hwpstate_softc *sc; + struct hwpstate_setting set; + uint64_t msr; + sc = device_get_softc(dev); - if (cf == NULL) + msr = rdmsr(MSR_AMD_10H_11H_STATUS); + if (msr >= sc->cfnum) return (EINVAL); + set = sc->hwpstate_settings[msr]; - if ((sc->flags & HWPFL_USE_CPPC) != 0) { - pc = cpu_get_pcpu(dev); - if (pc == NULL) - return (ENXIO); - - memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); - cf->dev = dev; - if ((ret = cpu_est_clockrate(pc->pc_cpuid, &rate))) - return (ret); - cf->freq = rate / 1000000; - } else { - msr = rdmsr(MSR_AMD_10H_11H_STATUS); - if (msr >= sc->cfnum) - return (EINVAL); - set = sc->hwpstate_settings[msr]; - - cf->freq = set.freq; - cf->volts = set.volts; - cf->power = set.power; - cf->lat = set.lat; - cf->dev = dev; - } + cf->freq = set.freq; + cf->volts = set.volts; + cf->power = set.power; + cf->lat = set.lat; + cf->dev = dev; return (0); } static int -hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) +hwpstate_get(device_t dev, struct cf_setting *cf) { struct hwpstate_softc *sc; + + sc = device_get_softc(dev); + if (cf == NULL) + return (EINVAL); + return (sc->cpufreq_methods->get(dev, cf)); +} + +static int +hwpstate_settings_cppc(device_t dev __unused, struct cf_setting *sets __unused, + int *count __unused) +{ + return (EOPNOTSUPP); +} + +static int +hwpstate_settings_pstate(device_t dev, struct cf_setting *sets, int *count) +{ struct hwpstate_setting set; + struct hwpstate_softc *sc; int i; - if (sets == NULL || count == NULL) - return (EINVAL); sc = device_get_softc(dev); - if ((sc->flags & HWPFL_USE_CPPC) != 0) - return (EOPNOTSUPP); - if (*count < sc->cfnum) return (E2BIG); for (i = 0; i < sc->cfnum; i++, sets++) { @@ -710,21 +743,40 @@ hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) } static int -hwpstate_type(device_t dev, int *type) +hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) { struct hwpstate_softc *sc; - if (type == NULL) + if (sets == NULL || count == NULL) return (EINVAL); sc = device_get_softc(dev); + return (sc->cpufreq_methods->settings(dev, sets, count)); +} +static int +hwpstate_type_cppc(device_t dev, int *type) +{ + *type |= CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | + CPUFREQ_FLAG_UNCACHED; + return (0); +} + +static int +hwpstate_type_pstate(device_t dev, int *type) +{ *type = CPUFREQ_TYPE_ABSOLUTE; - *type |= (sc->flags & HWPFL_USE_CPPC) != 0 ? - CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED : - 0; return (0); } +static int +hwpstate_type(device_t dev, int *type) +{ + struct hwpstate_softc *sc; + + sc = device_get_softc(dev); + return (sc->cpufreq_methods->type(dev, type)); +} + static void hwpstate_identify(driver_t *driver, device_t parent) { @@ -909,34 +961,14 @@ enable_cppc(struct hwpstate_softc *sc) } static int -hwpstate_probe(device_t dev) +hwpstate_probe_pstate(device_t dev) { struct hwpstate_softc *sc; device_t perf_dev; - uint64_t msr; int error, type; + uint64_t msr; sc = device_get_softc(dev); - - if (hwpstate_amd_cppc_enable && - (amd_extended_feature_extensions & AMDFEID_CPPC)) { - sc->flags |= HWPFL_USE_CPPC; - device_set_desc(dev, - "AMD Collaborative Processor Performance Control (CPPC)"); - } else { - /* - * No CPPC support. Only keep hwpstate0, it goes well with - * acpi_throttle. - */ - if (device_get_unit(dev) != 0) - return (ENXIO); - device_set_desc(dev, "Cool`n'Quiet 2.0"); - } - - sc->dev = dev; - if ((sc->flags & HWPFL_USE_CPPC) != 0) - return (0); - /* * Check if acpi_perf has INFO only flag. */ @@ -984,10 +1016,49 @@ hwpstate_probe(device_t dev) */ if (error) error = hwpstate_get_info_from_msr(dev); - if (error) - return (error); + return (error); +} - return (0); +static const struct hwpstate_cpufreq_methods cppc_methods = { + .get = hwpstate_get_cppc, + .set = hwpstate_set_cppc, + .settings = hwpstate_settings_cppc, + .type = hwpstate_type_cppc }; + +static const struct hwpstate_cpufreq_methods pstate_methods = { + .get = hwpstate_get_pstate, + .set = hwpstate_set_pstate, + .settings = hwpstate_settings_pstate, + .type = hwpstate_type_pstate }; + +static int +hwpstate_probe(device_t dev) +{ + struct hwpstate_softc *sc; + sc = device_get_softc(dev); + + if (hwpstate_amd_cppc_enable && + (amd_extended_feature_extensions & AMDFEID_CPPC)) { + sc->flags |= HWPFL_USE_CPPC; + device_set_desc(dev, + "AMD Collaborative Processor Performance Control (CPPC)"); + } else { + /* + * No CPPC support. Only keep hwpstate0, it goes well with + * acpi_throttle. + */ + if (device_get_unit(dev) != 0) + return (ENXIO); + device_set_desc(dev, "Cool`n'Quiet 2.0"); + } + + sc->dev = dev; + if ((sc->flags & HWPFL_USE_CPPC) != 0) { + sc->cpufreq_methods = &cppc_methods; + return (0); + } + sc->cpufreq_methods = &pstate_methods; + return (hwpstate_probe_pstate(dev)); } static int @@ -1037,8 +1108,8 @@ hwpstate_attach(device_t dev) SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "desired_performance", - CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - dev, AMD_CPPC_REQUEST_DES_PERF_BITS, + CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, dev, + AMD_CPPC_REQUEST_DES_PERF_BITS, sysctl_cppc_request_field_handler, "IU", "Desired performance level (from 0 to 255; " "0 enables autonomous mode, otherwise value should be " diff --git a/sys/x86/include/apicvar.h b/sys/x86/include/apicvar.h index 551f5527ac00..2dda3103f93c 100644 --- a/sys/x86/include/apicvar.h +++ b/sys/x86/include/apicvar.h @@ -84,6 +84,7 @@ * to use that ID. */ #define IOAPIC_MAX_ID 0xff +#define IOAPIC_MAX_EXT_ID 0x7fff /* I/O Interrupts are used for external devices such as ISA, PCI, etc. */ #define APIC_IO_INTS (IDT_IO_INTS + 16) @@ -265,6 +266,7 @@ device_t ioapic_get_dev(u_int apic_id); extern int x2apic_mode; extern int lapic_eoi_suppression; +extern int apic_ext_dest_id; #ifdef _SYS_SYSCTL_H_ SYSCTL_DECL(_hw_apic); diff --git a/sys/x86/include/bhyve.h b/sys/x86/include/bhyve.h new file mode 100644 index 000000000000..215bee90bd1a --- /dev/null +++ b/sys/x86/include/bhyve.h @@ -0,0 +1,35 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2025 Amazon.com, Inc. or its affiliates. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _X86_BHYVE_H_ +#define _X86_BHYVE_H_ + +/* Bhyve feature detection */ +#define CPUID_BHYVE_FEATURES 0x40000001 +#define CPUID_BHYVE_FEAT_EXT_DEST_ID (1UL << 0) /* MSI Extended Dest ID */ + +#endif diff --git a/sys/x86/include/frame.h b/sys/x86/include/frame.h index a6444d55cfaf..b8e090ff95d0 100644 --- a/sys/x86/include/frame.h +++ b/sys/x86/include/frame.h @@ -152,6 +152,10 @@ struct trapframe { uint16_t tf_ss; uint16_t tf_fred_evinfo1; uint32_t tf_fred_evinfo2; +}; + +struct trapframe_fred { + struct trapframe tf_idt; /* two long words added by FRED */ uint64_t tf_fred_evdata; uint64_t tf_fred_zero1; diff --git a/sys/x86/include/kvm.h b/sys/x86/include/kvm.h index fef26bde226a..83dd20fa8d23 100644 --- a/sys/x86/include/kvm.h +++ b/sys/x86/include/kvm.h @@ -49,6 +49,7 @@ #define KVM_FEATURE_CLOCKSOURCE 0x00000001 #define KVM_FEATURE_CLOCKSOURCE2 0x00000008 +#define KVM_FEATURE_MSI_EXT_DEST_ID 0x00008000 #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 0x01000000 /* Deprecated: for the CLOCKSOURCE feature. */ diff --git a/sys/x86/isa/atpic.c b/sys/x86/isa/atpic.c index 56566ef8d64d..0c884c5fc484 100644 --- a/sys/x86/isa/atpic.c +++ b/sys/x86/isa/atpic.c @@ -68,8 +68,6 @@ #define IMEN_MASK(ai) (IRQ_MASK((ai)->at_irq)) -#define NUM_ISA_IRQS 16 - static void atpic_init(void *dummy); inthand_t diff --git a/sys/x86/isa/icu.h b/sys/x86/isa/icu.h index ae7303c0e36e..35734750b3b1 100644 --- a/sys/x86/isa/icu.h +++ b/sys/x86/isa/icu.h @@ -71,4 +71,6 @@ void atpic_handle_intr(u_int vector, struct trapframe *frame); void atpic_startup(void); +#define NUM_ISA_IRQS 16 + #endif /* !_X86_ISA_ICU_H_ */ diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c index 5483fbd6dd4e..422bdf9cfb0d 100644 --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -423,8 +423,9 @@ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { uint64_t tsc1, tsc2; - uint64_t acnt, mcnt, perf; + uint64_t acnt_start, acnt_end, mcnt_start, mcnt_end, perf; register_t reg; + int error = 0; if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); @@ -452,15 +453,20 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate) /* Calibrate by measuring a short delay. */ reg = intr_disable(); if (tsc_is_invariant) { - wrmsr(MSR_MPERF, 0); - wrmsr(MSR_APERF, 0); + mcnt_start = rdmsr(MSR_MPERF); + acnt_start = rdmsr(MSR_APERF); tsc1 = rdtsc(); DELAY(1000); - mcnt = rdmsr(MSR_MPERF); - acnt = rdmsr(MSR_APERF); + mcnt_end = rdmsr(MSR_MPERF); + acnt_end = rdmsr(MSR_APERF); tsc2 = rdtsc(); intr_restore(reg); - perf = 1000 * acnt / mcnt; + if (mcnt_end == mcnt_start) { + tsc_perf_stat = 0; + error = EOPNOTSUPP; + goto err; + } + perf = 1000 * (acnt_end - acnt_start) / (mcnt_end - mcnt_start); *rate = (tsc2 - tsc1) * perf; } else { tsc1 = rdtsc(); @@ -470,6 +476,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate) *rate = (tsc2 - tsc1) * 1000; } +err: #ifdef SMP if (smp_cpus > 1) { thread_lock(curthread); @@ -478,7 +485,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate) } #endif - return (0); + return (error); } /* diff --git a/sys/x86/x86/io_apic.c b/sys/x86/x86/io_apic.c index d0f47f82011a..28841cfc0e21 100644 --- a/sys/x86/x86/io_apic.c +++ b/sys/x86/x86/io_apic.c @@ -85,6 +85,7 @@ struct ioapic_intsrc { u_int io_activehi:1; u_int io_edgetrigger:1; u_int io_masked:1; + u_int io_valid:1; int io_bus:4; uint32_t io_lowreg; u_int io_remap_cookie; @@ -193,7 +194,7 @@ _ioapic_eoi_source(struct intsrc *isrc, int locked) ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(src->io_intpin), low1); low1 = src->io_lowreg; - if (src->io_masked != 0) + if (src->io_masked != 0 || src->io_valid == 0) low1 |= IOART_INTMSET; ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(src->io_intpin), low1); @@ -268,7 +269,9 @@ ioapic_enable_source(struct intsrc *isrc) mtx_lock_spin(&icu_lock); if (intpin->io_masked) { - flags = intpin->io_lowreg & ~IOART_INTMASK; + flags = intpin->io_lowreg; + if (intpin->io_valid) + flags &= ~IOART_INTMASK; ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(intpin->io_intpin), flags); intpin->io_masked = 0; @@ -361,10 +364,27 @@ ioapic_program_intpin(struct ioapic_intsrc *intpin) /* * Set the destination. Note that with Intel interrupt remapping, * the previously reserved bits 55:48 now have a purpose so ensure - * these are zero. + * these are zero. If the CPU number (in fact, APIC ID) is too + * large, mark the interrupt as invalid, and target CPU #0. */ - low = IOART_DESTPHY; - high = intpin->io_cpu << APIC_ID_SHIFT; + if (intpin->io_cpu <= IOAPIC_MAX_ID) { + low = IOART_DESTPHY; + high = intpin->io_cpu << APIC_ID_SHIFT; + intpin->io_valid = 1; + } else if (intpin->io_cpu <= IOAPIC_MAX_EXT_ID && + apic_ext_dest_id == 1) { + low = IOART_DESTPHY; + high = intpin->io_cpu << APIC_ID_SHIFT & APIC_ID_MASK; + high |= (intpin->io_cpu >> 8) << APIC_EXT_ID_SHIFT + & APIC_EXT_ID_MASK; + intpin->io_valid = 1; + } else { + printf("%s: unsupported destination APIC ID %u for pin %u\n", + __func__, intpin->io_cpu, intpin->io_intpin); + low = IOART_DESTPHY; + high = 0 << APIC_ID_SHIFT; + intpin->io_valid = 0; + } /* Program the rest of the low word. */ if (intpin->io_edgetrigger) @@ -375,7 +395,7 @@ ioapic_program_intpin(struct ioapic_intsrc *intpin) low |= IOART_INTAHI; else low |= IOART_INTALO; - if (intpin->io_masked) + if (intpin->io_masked || !intpin->io_valid) low |= IOART_INTMSET; switch (intpin->io_irq) { case IRQ_EXTINT: @@ -697,11 +717,13 @@ ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase) intpin->io_activehi = 1; intpin->io_edgetrigger = 1; intpin->io_masked = 1; + intpin->io_valid = 1; } else { intpin->io_bus = APIC_BUS_PCI; intpin->io_activehi = 0; intpin->io_edgetrigger = 0; intpin->io_masked = 1; + intpin->io_valid = 1; } /* @@ -796,6 +818,7 @@ ioapic_set_nmi(ioapic_drv_t io, u_int pin) io->io_pins[pin].io_bus = APIC_BUS_UNKNOWN; io->io_pins[pin].io_irq = IRQ_NMI; io->io_pins[pin].io_masked = 0; + io->io_pins[pin].io_valid = 1; io->io_pins[pin].io_edgetrigger = 1; io->io_pins[pin].io_activehi = 1; if (bootverbose) @@ -817,6 +840,7 @@ ioapic_set_smi(ioapic_drv_t io, u_int pin) io->io_pins[pin].io_bus = APIC_BUS_UNKNOWN; io->io_pins[pin].io_irq = IRQ_SMI; io->io_pins[pin].io_masked = 0; + io->io_pins[pin].io_valid = 1; io->io_pins[pin].io_edgetrigger = 1; io->io_pins[pin].io_activehi = 1; if (bootverbose) @@ -841,6 +865,7 @@ ioapic_set_extint(ioapic_drv_t io, u_int pin) io->io_pins[pin].io_masked = 0; else io->io_pins[pin].io_masked = 1; + io->io_pins[pin].io_valid = 1; io->io_pins[pin].io_edgetrigger = 1; io->io_pins[pin].io_activehi = 1; if (bootverbose) diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c index 8a8fb8ef41f6..cd5e4d474080 100644 --- a/sys/x86/x86/local_apic.c +++ b/sys/x86/x86/local_apic.c @@ -72,6 +72,10 @@ #include <machine/smp.h> #include <machine/specialreg.h> #include <x86/init.h> +#include <x86/kvm.h> +#include <contrib/xen/arch-x86/cpuid.h> +#include <x86/bhyve.h> +#include <dev/hyperv/vmbus/x86/hyperv_reg.h> #ifdef DDB #include <sys/interrupt.h> @@ -1445,6 +1449,9 @@ lapic_handle_intr(int vector, struct trapframe *frame) isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id), vector)); + KASSERT(isrc != NULL, + ("lapic_handle_intr: vector %d unrecognized at lapic %u", + vector, PCPU_GET(apic_id))); intr_execute_handlers(isrc, frame); } @@ -2083,6 +2090,47 @@ apic_setup_local(void *dummy __unused) } SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL); +/* Are we in a VM which supports the Extended Destination ID standard? */ +int apic_ext_dest_id = -1; +SYSCTL_INT(_machdep, OID_AUTO, apic_ext_dest_id, CTLFLAG_RDTUN, &apic_ext_dest_id, 0, + "Use APIC Extended Destination IDs"); + +/* Detect support for Extended Destination IDs. */ +static void +detect_extended_dest_id(void) +{ + u_int regs[4]; + + /* Check if we support extended destination IDs. */ + switch (vm_guest) { + case VM_GUEST_XEN: + cpuid_count(hv_base + 4, 0, regs); + if (regs[0] & XEN_HVM_CPUID_EXT_DEST_ID) + apic_ext_dest_id = 1; + break; + case VM_GUEST_HV: + cpuid_count(CPUID_LEAF_HV_STACK_INTERFACE, 0, regs); + if (regs[0] != HYPERV_STACK_INTERFACE_EAX_SIG) + break; + cpuid_count(CPUID_LEAF_HV_STACK_PROPERTIES, 0, regs); + if (regs[0] & HYPERV_PROPERTIES_EXT_DEST_ID) + apic_ext_dest_id = 1; + break; + case VM_GUEST_KVM: + kvm_cpuid_get_features(regs); + if (regs[0] & KVM_FEATURE_MSI_EXT_DEST_ID) + apic_ext_dest_id = 1; + break; + case VM_GUEST_BHYVE: + if (hv_high < CPUID_BHYVE_FEATURES) + break; + cpuid_count(CPUID_BHYVE_FEATURES, 0, regs); + if (regs[0] & CPUID_BHYVE_FEAT_EXT_DEST_ID) + apic_ext_dest_id = 1; + break; + } +} + /* * Setup the I/O APICs. */ @@ -2094,6 +2142,10 @@ apic_setup_io(void *dummy __unused) if (best_enum == NULL) return; + /* Check hypervisor support for extended destination IDs. */ + if (apic_ext_dest_id == -1) + detect_extended_dest_id(); + /* * Local APIC must be registered before other PICs and pseudo PICs * for proper suspend/resume order. diff --git a/sys/x86/x86/msi.c b/sys/x86/x86/msi.c index b38247bf6e45..9d5409015de7 100644 --- a/sys/x86/x86/msi.c +++ b/sys/x86/x86/msi.c @@ -92,6 +92,10 @@ #define INTEL_ADDR(msi) \ (MSI_INTEL_ADDR_BASE | (msi)->msi_cpu << 12 | \ MSI_INTEL_ADDR_RH_OFF | MSI_INTEL_ADDR_DM_PHYSICAL) +#define INTEL_ADDR_EXT(msi) \ + (MSI_INTEL_ADDR_BASE | ((msi)->msi_cpu & 0xff) << 12 | \ + ((msi)->msi_cpu & 0x7f00) >> 3 | \ + MSI_INTEL_ADDR_RH_OFF | MSI_INTEL_ADDR_DM_PHYSICAL) #define INTEL_DATA(msi) \ (MSI_INTEL_DATA_TRGREDG | MSI_INTEL_DATA_DELFIXED | (msi)->msi_vector) @@ -652,13 +656,16 @@ msi_map(int irq, uint64_t *addr, uint32_t *data) mtx_unlock(&msi_lock); error = EOPNOTSUPP; #endif - if (error == EOPNOTSUPP && msi->msi_cpu > 0xff) { + if (error == EOPNOTSUPP && + (msi->msi_cpu > 0x7fff || + (msi->msi_cpu > 0xff && apic_ext_dest_id != 1))) { printf("%s: unsupported destination APIC ID %u\n", __func__, msi->msi_cpu); error = EINVAL; } if (error == EOPNOTSUPP) { - *addr = INTEL_ADDR(msi); + *addr = (apic_ext_dest_id == 1) ? + INTEL_ADDR_EXT(msi) : INTEL_ADDR(msi); *data = INTEL_DATA(msi); error = 0; } diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c index 3b873d9dae73..f88ce60c6319 100644 --- a/sys/x86/x86/tsc.c +++ b/sys/x86/x86/tsc.c @@ -433,6 +433,8 @@ probe_tsc_freq_late(void) void start_TSC(void) { + uint64_t mperf, aperf; + if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) return; @@ -442,11 +444,12 @@ start_TSC(void) /* * XXX Some emulators expose host CPUID without actual support * for these MSRs. We must test whether they really work. + * They may also be read-only, so test for increment. */ - wrmsr(MSR_MPERF, 0); - wrmsr(MSR_APERF, 0); + mperf = rdmsr(MSR_MPERF); + aperf = rdmsr(MSR_APERF); DELAY(10); - if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) + if (rdmsr(MSR_MPERF) != mperf && rdmsr(MSR_APERF) != aperf) tsc_perf_stat = 1; } diff --git a/tests/sys/kern/Makefile b/tests/sys/kern/Makefile index 0e505d6cb51a..e65b4812f90b 100644 --- a/tests/sys/kern/Makefile +++ b/tests/sys/kern/Makefile @@ -54,7 +54,6 @@ PLAIN_TESTS_C+= subr_unit_test ATF_TESTS_C+= sysctl_kern_proc ATF_TESTS_C+= sys_getrandom ATF_TESTS_C+= timerfd -CFLAGS.timerfd+= -I${.CURDIR} LIBADD.timerfd+= pthread ATF_TESTS_C+= tty_pts ATF_TESTS_C+= unix_dgram diff --git a/tests/sys/kern/timerfd.c b/tests/sys/kern/timerfd.c index b24d093b346e..277516c3d9e2 100644 --- a/tests/sys/kern/timerfd.c +++ b/tests/sys/kern/timerfd.c @@ -25,11 +25,11 @@ #include <atf-c.h> #include <sys/types.h> - #include <sys/event.h> #include <sys/param.h> #include <sys/select.h> #include <sys/time.h> +#include <sys/timerfd.h> #include <errno.h> #include <signal.h> @@ -44,8 +44,6 @@ #include <time.h> #include <unistd.h> -#include <sys/timerfd.h> - /* Time in ns that sleeps are allowed to take longer for in unit tests. */ #define TIMER_SLACK (90000000) @@ -739,6 +737,8 @@ ATF_TC_BODY(timerfd__periodic_timer_performance, tc) uint64_t timeouts; ATF_REQUIRE(read(timerfd, &timeouts, sizeof(timeouts)) == (ssize_t)sizeof(timeouts)); + if (timeouts < 400000000) + atf_tc_expect_fail("https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=294053"); ATF_REQUIRE_MSG(timeouts >= 400000000, "%ld", (long)timeouts); ATF_REQUIRE(close(timerfd) == 0); diff --git a/tests/sys/netinet/Makefile b/tests/sys/netinet/Makefile index 564eabae3313..42906aa2dd93 100644 --- a/tests/sys/netinet/Makefile +++ b/tests/sys/netinet/Makefile @@ -13,7 +13,7 @@ ATF_TESTS_C= broadcast \ so_reuseport_lb_test \ socket_afinet \ tcp_connect_port_test \ - tcp_implied_connect \ + tcp_socket \ tcp_md5_getsockopt \ udp_bindings \ udp_io diff --git a/tests/sys/netinet/output.sh b/tests/sys/netinet/output.sh index 23d427605878..d6b3ccbe607c 100755 --- a/tests/sys/netinet/output.sh +++ b/tests/sys/netinet/output.sh @@ -223,7 +223,7 @@ output_raw_success_cleanup() mpath_check() { if [ `sysctl -iW net.route.multipath | wc -l` != "1" ]; then - atf_skip "This test requires ROUTE_MPATH enabled" + atf_skip "This test requires multipath enabled" fi } diff --git a/tests/sys/netinet/raw.c b/tests/sys/netinet/raw.c index 9c25b3ddbec2..d3feb315d5c4 100644 --- a/tests/sys/netinet/raw.c +++ b/tests/sys/netinet/raw.c @@ -36,6 +36,42 @@ #include <atf-c.h> +#define PROT1 253 /* RFC3692 */ +#define PROT2 254 /* RFC3692 */ +#define ADDR1 { htonl(0xc0000202) } /* RFC5737 */ +#define ADDR2 { htonl(0xc0000203) } /* RFC5737 */ +#define WILD { htonl(INADDR_ANY) } +#define LOOP(x) { htonl(INADDR_LOOPBACK + (x)) } +#define MULT(x) { htonl(INADDR_UNSPEC_GROUP + (x)) } + +static int +rawsender(bool mcast) +{ + int s; + + ATF_REQUIRE((s = socket(PF_INET, SOCK_RAW, 0)) != -1); + ATF_REQUIRE(setsockopt(s, IPPROTO_IP, IP_HDRINCL, &(int){1}, + sizeof(int)) == 0); + /* + * Make sending socket connected. The socket API requires connected + * status to use send(2), even with IP_HDRINCL. + */ + ATF_REQUIRE(connect(s, + (struct sockaddr *)&(struct sockaddr_in){ + .sin_family = AF_INET, + .sin_len = sizeof(struct sockaddr_in), + .sin_addr = { htonl(INADDR_ANY) }, + }, sizeof(struct sockaddr_in)) == 0); + + if (mcast) + ATF_REQUIRE(setsockopt(s, IPPROTO_IP, IP_MULTICAST_IF, + &(struct ip_mreqn){ + .imr_ifindex = if_nametoindex("lo0"), + }, sizeof(struct ip_mreqn)) == 0); + + return (s); +} + /* * The 'input' test exercises logic of rip_input(). The best documentation * for raw socket input behavior is collected in Stevens's UNIX Network @@ -46,65 +82,56 @@ * The table tests[] describes our expectations. */ ATF_TC_WITHOUT_HEAD(input); -#define PROT1 253 /* RFC3692 */ -#define PROT2 254 /* RFC3692 */ -static const struct rcvr { - struct in_addr laddr, faddr, maddr; - uint8_t proto; -} rcvrs[] = { -#define WILD { htonl(INADDR_ANY) } -#define LOOP(x) { htonl(INADDR_LOOPBACK + (x)) } -#define MULT(x) { htonl(INADDR_UNSPEC_GROUP + (x)) } - { WILD, WILD, WILD, 0 }, - { WILD, WILD, WILD, PROT1 }, - { LOOP(0), WILD, WILD, 0 }, - { LOOP(0), WILD, WILD, PROT1 }, - { LOOP(1), WILD, WILD, 0 }, - { LOOP(1), WILD, WILD, PROT1 }, - { LOOP(0), LOOP(2), WILD, 0 }, - { LOOP(0), LOOP(2), WILD, PROT1 }, - { LOOP(0), LOOP(3), WILD, 0 }, - { LOOP(0), LOOP(3), WILD, PROT1 }, - { LOOP(1), LOOP(3), WILD, 0 }, - { LOOP(1), LOOP(3), WILD, PROT1 }, - { WILD, WILD, MULT(1), 0 }, -}; -static const struct test { - struct in_addr src, dst; - uint8_t proto; - bool results[nitems(rcvrs)]; -} tests[] = { +ATF_TC_BODY(input, tc) +{ + static const struct rcvr { + struct in_addr laddr, faddr, maddr; + uint8_t proto; + } rcvrs[] = { + { WILD, WILD, WILD, 0 }, + { WILD, WILD, WILD, PROT1 }, + { LOOP(0), WILD, WILD, 0 }, + { LOOP(0), WILD, WILD, PROT1 }, + { LOOP(1), WILD, WILD, 0 }, + { LOOP(1), WILD, WILD, PROT1 }, + { LOOP(0), LOOP(2), WILD, 0 }, + { LOOP(0), LOOP(2), WILD, PROT1 }, + { LOOP(0), LOOP(3), WILD, 0 }, + { LOOP(0), LOOP(3), WILD, PROT1 }, + { LOOP(1), LOOP(3), WILD, 0 }, + { LOOP(1), LOOP(3), WILD, PROT1 }, + { WILD, WILD, MULT(1), 0 }, + }; + static const struct test { + struct in_addr src, dst; + uint8_t proto; + bool results[nitems(rcvrs)]; + } tests[] = { #define x true #define o false - { LOOP(2), LOOP(0), PROT1, - { x, x, x, x, o, o, x, x, o, o, o, o, x } }, - { LOOP(2), LOOP(0), PROT2, - { x, o, x, o, o, o, x, o, o, o, o, o, x } }, - { LOOP(3), LOOP(0), PROT1, - { x, x, x, x, o, o, o, o, x, x, o, o, x } }, - { LOOP(3), LOOP(0), PROT2, - { x, o, x, o, o, o, o, o, x, o, o, o, x } }, - { LOOP(2), LOOP(1), PROT1, - { x, x, o, o, x, x, o, o, o, o, o, o, x } }, - { LOOP(2), LOOP(1), PROT2, - { x, o, o, o, x, o, o, o, o, o, o, o, x } }, - { LOOP(3), LOOP(1), PROT1, - { x, x, o, o, x, x, o, o, o, o, x, x, x } }, - { LOOP(3), LOOP(1), PROT2, - { x, o, o, o, x, o, o, o, o, o, x, o, x } }, - { LOOP(3), MULT(1), PROT1, - { x, x, o, o, o, o, o, o, o, o, o, o, x } }, - { LOOP(3), MULT(2), PROT1, - { x, x, o, o, o, o, o, o, o, o, o, o, o } }, -#undef WILD -#undef LOOP -#undef MULT + { LOOP(2), LOOP(0), PROT1, + { x, x, x, x, o, o, x, x, o, o, o, o, x } }, + { LOOP(2), LOOP(0), PROT2, + { x, o, x, o, o, o, x, o, o, o, o, o, x } }, + { LOOP(3), LOOP(0), PROT1, + { x, x, x, x, o, o, o, o, x, x, o, o, x } }, + { LOOP(3), LOOP(0), PROT2, + { x, o, x, o, o, o, o, o, x, o, o, o, x } }, + { LOOP(2), LOOP(1), PROT1, + { x, x, o, o, x, x, o, o, o, o, o, o, x } }, + { LOOP(2), LOOP(1), PROT2, + { x, o, o, o, x, o, o, o, o, o, o, o, x } }, + { LOOP(3), LOOP(1), PROT1, + { x, x, o, o, x, x, o, o, o, o, x, x, x } }, + { LOOP(3), LOOP(1), PROT2, + { x, o, o, o, x, o, o, o, o, o, x, o, x } }, + { LOOP(3), MULT(1), PROT1, + { x, x, o, o, o, o, o, o, o, o, o, o, x } }, + { LOOP(3), MULT(2), PROT1, + { x, x, o, o, o, o, o, o, o, o, o, o, o } }, #undef x #undef o -}; - -ATF_TC_BODY(input, tc) -{ + }; struct pkt { struct ip ip; char payload[100]; @@ -158,24 +185,11 @@ ATF_TC_BODY(input, tc) } } - ATF_REQUIRE((s = socket(PF_INET, SOCK_RAW, 0)) != -1); - ATF_REQUIRE(setsockopt(s, IPPROTO_IP, IP_HDRINCL, &(int){1}, - sizeof(int)) == 0); - /* - * Make sending socket connected. The socket API requires connected - * status to use send(2), even with IP_HDRINCL. Another side effect - * is that the sending socket won't receive own datagrams, which we - * don't drain out in this program. - */ - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK + 100); - ATF_REQUIRE(connect(s, (struct sockaddr *)&sin, sizeof(sin)) == 0); /* * Force multicast interface for the sending socket to be able to * send to MULT(x) destinations. */ - mreqn.imr_multiaddr.s_addr = 0; - ATF_REQUIRE(setsockopt(s, IPPROTO_IP, IP_MULTICAST_IF, &mreqn, - sizeof(mreqn)) == 0); + s = rawsender(true); for (u_int i = 0; i < nitems(tests); i++) { arc4random_buf(&pkt.payload, sizeof(pkt.payload)); @@ -211,9 +225,94 @@ ATF_TC_BODY(input, tc) } } +/* + * Test input on the same socket that changes its connection status. We send + * packets with different sources in each iteration and check results. + * Check that connect(INADDR_ANY) is effectively a disconnect and turns socket + * back to receive-all mode. + */ +ATF_TC_WITHOUT_HEAD(reconnect); +ATF_TC_BODY(reconnect, tc) +{ + static const struct in_addr srcs[] = { ADDR1, ADDR2 }; + static const struct test { + struct in_addr faddr; + bool results[nitems(srcs)]; + } tests[] = { + { ADDR1, { true, false } }, + { ADDR2, { false, true } }, + { {INADDR_ANY}, { true, true } }, + }; + struct pkt { + struct ip ip; + char payload[100]; + } __packed pkt = { + .ip.ip_v = IPVERSION, + .ip.ip_hl = sizeof(struct ip) >> 2, + .ip.ip_len = htons(sizeof(struct pkt)), + .ip.ip_ttl = 16, + .ip.ip_p = PROT1, + .ip.ip_dst = LOOP(0), + }; + int r, s; + + /* XXX */ + system("/sbin/ifconfig lo0 127.0.0.1/32"); + + ATF_REQUIRE((r = socket(PF_INET, SOCK_RAW | SOCK_NONBLOCK, 0)) != -1); + s = rawsender(false); + + for (u_int i = 0; i < nitems(tests); i++) { + ATF_REQUIRE(connect(r, + (struct sockaddr *)&(struct sockaddr_in){ + .sin_family = AF_INET, + .sin_len = sizeof(struct sockaddr_in), + .sin_addr = tests[i].faddr, + }, sizeof(struct sockaddr_in)) == 0); + + for (u_int j = 0; j < nitems(srcs); j++) { + char buf[sizeof(pkt)]; + char p[2][INET_ADDRSTRLEN]; + ssize_t ss; + + arc4random_buf(&pkt.payload, sizeof(pkt.payload)); + pkt.ip.ip_src = srcs[j]; + ATF_REQUIRE(send(s, &pkt, sizeof(pkt), 0) == + sizeof(pkt)); + + /* + * The sender is a blocking socket, so we first receive + * from the sender and when this read returns we are + * guaranteed that the test socket also received the + * datagram. + */ + ss = recv(s, buf, sizeof(buf), 0); + ATF_REQUIRE(ss == sizeof(buf) && + memcmp(buf + sizeof(struct ip), + pkt.payload, sizeof(pkt.payload)) == 0); + + ss = recv(r, buf, sizeof(buf), 0); + + ATF_REQUIRE_MSG((tests[i].results[j] == true && + ss == sizeof(buf) && memcmp(buf + sizeof(struct ip), + pkt.payload, sizeof(pkt.payload)) == 0) || + (tests[i].results[j] == false && ss == -1 && + errno == EAGAIN), + "test #%u src %s connect address %s unexpected " + "receive of %zd bytes errno %d", i, + inet_ntop(AF_INET, &srcs[j], p[0], + INET_ADDRSTRLEN), + inet_ntop(AF_INET, &tests[i].faddr, p[1], + INET_ADDRSTRLEN), + ss, errno); + } + } +} + ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, input); + ATF_TP_ADD_TC(tp, reconnect); return (atf_no_error()); } diff --git a/tests/sys/netinet/tcp_implied_connect.c b/tests/sys/netinet/tcp_socket.c index d03d6be4fb92..668a3915adb7 100644 --- a/tests/sys/netinet/tcp_implied_connect.c +++ b/tests/sys/netinet/tcp_socket.c @@ -33,8 +33,8 @@ #include <atf-c.h> -ATF_TC_WITHOUT_HEAD(tcp_implied_connect); -ATF_TC_BODY(tcp_implied_connect, tc) +ATF_TC_WITHOUT_HEAD(implied_connect); +ATF_TC_BODY(implied_connect, tc) { struct sockaddr_in sin = { .sin_family = AF_INET, @@ -72,9 +72,42 @@ ATF_TC_BODY(tcp_implied_connect, tc) ATF_REQUIRE(strcmp(buf, repl) == 0); } +/* + * A disconnected TCP socket shall return the local address it used before + * it was disconnected. + */ +ATF_TC_WITHOUT_HEAD(getsockname_disconnected); +ATF_TC_BODY(getsockname_disconnected, tc) +{ + struct sockaddr_in sin = { + .sin_family = AF_INET, + .sin_len = sizeof(sin), + }; + socklen_t len; + int s, c, a; + + ATF_REQUIRE(s = socket(PF_INET, SOCK_STREAM, 0)); + ATF_REQUIRE(c = socket(PF_INET, SOCK_STREAM, 0)); + + ATF_REQUIRE(bind(s, (struct sockaddr *)&sin, sizeof(sin)) == 0); + len = sizeof(sin); + ATF_REQUIRE(getsockname(s, (struct sockaddr *)&sin, &len) == 0); + sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + ATF_REQUIRE(listen(s, -1) == 0); + ATF_REQUIRE(connect(c, (struct sockaddr *)&sin, sizeof(sin)) == 0); + ATF_REQUIRE((a = accept(s, NULL, NULL)) != 1); + ATF_REQUIRE(close(a) == 0); + ATF_REQUIRE(getsockname(c, (struct sockaddr *)&sin, &len) == 0); + ATF_REQUIRE(sin.sin_addr.s_addr == htonl(INADDR_LOOPBACK)); + + close(c); + close(s); +} + ATF_TP_ADD_TCS(tp) { - ATF_TP_ADD_TC(tp, tcp_implied_connect); + ATF_TP_ADD_TC(tp, implied_connect); + ATF_TP_ADD_TC(tp, getsockname_disconnected); return (atf_no_error()); } diff --git a/tests/sys/netinet6/ndp.sh b/tests/sys/netinet6/ndp.sh index c8de4083a9ea..526ef27a7fb3 100755 --- a/tests/sys/netinet6/ndp.sh +++ b/tests/sys/netinet6/ndp.sh @@ -365,6 +365,55 @@ ndp_prefix_lifetime_extend_body() { fi } +atf_test_case "ndp_grand_linklayer_event" "cleanup" +ndp_grand_linklayer_event_head() { + atf_set descr 'Test ndp GRAND on link-layer change event' + atf_set require.user root +} + +ndp_grand_linklayer_event_body() { + local epair0 jname address mac + + vnet_init + + jname="v6t-ndp_grand_linklayer_event" + prefix="2001:db8:ffff:1000::" + mac="90:10:00:01:02:03" + + epair0=$(vnet_mkepair) + + vnet_mkjail ${jname}1 ${epair0}a + vnet_mkjail ${jname}2 ${epair0}b + + ndp_if_up ${epair0}a ${jname}1 + ndp_if_up ${epair0}b ${jname}2 + + # with no_dad, grand for new global address will NOT run + atf_check ifconfig -j ${jname}1 ${epair0}a inet6 ${prefix}1 no_dad + atf_check ifconfig -j ${jname}2 ${epair0}b inet6 ${prefix}2 no_dad + atf_check -s exit:1 -e ignore -o ignore \ + jexec ${jname}2 ndp -n ${prefix}1 + + # Create the NCE in jail 2. + atf_check -o ignore jexec ${jname}2 ping -c1 -t1 ${prefix}1 + + # Check if current mac is received + atf_check -s exit:0 -o ignore jexec ${jname}2 ndp -n ${prefix}1 + + # change mac address to trigger grand. + atf_check ifconfig -j ${jname}1 ${epair0}a ether ${mac} + + # link-local is the first address, thus our address should + # wait a second before sending its NA + atf_check -o not-match:"${prefix}1.*${mac}.*" \ + jexec ${jname}2 ndp -n ${prefix}1 + + sleep 1.1 + # Check if mac address automatically updated + atf_check -o match:"${prefix}1.*${mac}.*" \ + jexec ${jname}2 ndp -n ${prefix}1 +} + atf_init_test_cases() { atf_add_test_case "ndp_add_gu_success" @@ -373,4 +422,5 @@ atf_init_test_cases() atf_add_test_case "ndp_prefix_len_mismatch" atf_add_test_case "ndp_prefix_lifetime" atf_add_test_case "ndp_prefix_lifetime_extend" + atf_add_test_case "ndp_grand_linklayer_event" } diff --git a/tests/sys/netinet6/output6.sh b/tests/sys/netinet6/output6.sh index 5811e0e5eacc..bf8903ba3c07 100755 --- a/tests/sys/netinet6/output6.sh +++ b/tests/sys/netinet6/output6.sh @@ -247,7 +247,7 @@ output6_raw_success_cleanup() mpath_check() { if [ `sysctl -iW net.route.multipath | wc -l` != "1" ]; then - atf_skip "This test requires ROUTE_MPATH enabled" + atf_skip "This test requires multipath enabled" fi } diff --git a/tests/sys/netlink/test_snl.c b/tests/sys/netlink/test_snl.c index 3990aa0b075d..c2bbc4aff949 100644 --- a/tests/sys/netlink/test_snl.c +++ b/tests/sys/netlink/test_snl.c @@ -64,11 +64,11 @@ ATF_TC_BODY(snl_parse_errmsg_capped, tc) if (!snl_init(&ss, NETLINK_ROUTE)) atf_tc_fail("snl_init() failed"); - atf_tc_skip("does not work"); - int optval = 1; ATF_CHECK(setsockopt(ss.fd, SOL_NETLINK, NETLINK_CAP_ACK, &optval, sizeof(optval)) == 0); + optval = 0; + ATF_CHECK(setsockopt(ss.fd, SOL_NETLINK, NETLINK_EXT_ACK, &optval, sizeof(optval)) == 0); snl_init_writer(&ss, &nw); struct nlmsghdr *hdr = snl_create_msg_request(&nw, 255); diff --git a/tests/sys/netpfil/common/utils.subr b/tests/sys/netpfil/common/utils.subr index 8f298960bef5..8885495cec11 100644 --- a/tests/sys/netpfil/common/utils.subr +++ b/tests/sys/netpfil/common/utils.subr @@ -88,7 +88,7 @@ firewall_init() if ! kldstat -q -m ipfw; then atf_skip "This test requires ipfw" elif [ $(sysctl -n net.inet.ip.fw.default_to_accept) -ne 1 ]; then - atf_fail "ipfw tests require net.inet.ip.fw.default_to_accept=1 tunable" + atf_skip "ipfw tests require net.inet.ip.fw.default_to_accept=1 tunable" fi elif [ ${firewall} == "pf" ]; then if [ ! -c /dev/pf ]; then diff --git a/tests/sys/netpfil/pf/nat.sh b/tests/sys/netpfil/pf/nat.sh index 025471f75f97..600688a8dbc9 100644 --- a/tests/sys/netpfil/pf/nat.sh +++ b/tests/sys/netpfil/pf/nat.sh @@ -953,6 +953,50 @@ dummynet_mask_cleanup() pft_cleanup } +atf_test_case "first_match" "cleanup" +first_match_head() +{ + atf_set descr 'Test that NAT rules are first match' + atf_set require.user root +} + +first_match_body() +{ + pft_init + + epair_nat=$(vnet_mkepair) + epair_echo=$(vnet_mkepair) + + vnet_mkjail nat ${epair_nat}b ${epair_echo}a + vnet_mkjail echo ${epair_echo}b + + ifconfig ${epair_nat}a 192.0.2.2/24 up + route add -net 198.51.100.0/24 192.0.2.1 + + jexec nat ifconfig ${epair_nat}b 192.0.2.1/24 up + jexec nat ifconfig ${epair_echo}a 198.51.100.1/24 up + jexec nat sysctl net.inet.ip.forwarding=1 + + jexec echo ifconfig ${epair_echo}b 198.51.100.2/24 up + + # Enable pf! + jexec nat pfctl -e + pft_set_rules nat \ + "table <foo> { 192.0.2.0/24 }" \ + "nat on ${epair_echo}a inet from <foo> to any -> 198.51.100.1" \ + "nat on ${epair_echo}a inet from 192.0.2.0/24 to any -> 198.51.100.3" + + atf_check -s exit:0 -o ignore ping -c 3 198.51.100.2 + atf_check -s exit:0 -e ignore \ + -o match:"all icmp 198.51.100.1:.*(192.0.2.2:.*) -> 198.51.100.2:8.*" \ + jexec nat pfctl -ss +} + +first_match_cleanup() +{ + pft_cleanup +} + atf_init_test_cases() { atf_add_test_case "exhaust" @@ -975,4 +1019,5 @@ atf_init_test_cases() atf_add_test_case "binat_match" atf_add_test_case "empty_pool" atf_add_test_case "dummynet_mask" + atf_add_test_case "first_match" } diff --git a/tests/sys/netpfil/pf/pass_block.sh b/tests/sys/netpfil/pf/pass_block.sh index e955068d014b..f6d973de7cf4 100644 --- a/tests/sys/netpfil/pf/pass_block.sh +++ b/tests/sys/netpfil/pf/pass_block.sh @@ -451,6 +451,43 @@ any_if_cleanup() pft_cleanup } +atf_test_case "addr_range" "cleanup" +addr_range_head() +{ + atf_set descr 'Test rulesets with multiple address ranges' + atf_set require.user root +} + +addr_range_body() +{ + pft_init + + epair=$(vnet_mkepair) + ifconfig ${epair}b 192.0.2.2/24 up + + vnet_mkjail alcatraz ${epair}a + jexec alcatraz ifconfig ${epair}a 192.0.2.1/24 up + + # Sanity check + atf_check -s exit:0 -o ignore ping -c 1 -t 1 192.0.2.1 + + jexec alcatraz pfctl -e + pft_set_rules alcatraz \ + "block" \ + "pass inet from any to 10.100.100.1 - 10.100.100.20" \ + "pass inet from any to 192.0.2.1 - 192.0.2.10" + +jexec alcatraz pfctl -sr -vv + + atf_check -s exit:0 -o ignore ping -c 1 -t 1 192.0.2.1 +jexec alcatraz pfctl -sr -vv +} + +addr_range_cleanup() +{ + pft_cleanup +} + atf_init_test_cases() { atf_add_test_case "enable_disable" @@ -462,4 +499,5 @@ atf_init_test_cases() atf_add_test_case "received_on" atf_add_test_case "optimize_any" atf_add_test_case "any_if" + atf_add_test_case "addr_range" } diff --git a/tools/build/Makefile b/tools/build/Makefile index 161b8f635c6d..604885dea4c8 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -253,6 +253,9 @@ SYSINCS+= ${SRCTOP}/sys/sys/linker_set.h .if ${.MAKE.OS} == "Darwin" # Standalone implementation of secure_getenv(), not available on MacOS. SRCS+= secure_getenv.c +# macOS currently lacks mempcpy +.PATH: ${LIBC_SRCTOP}/string +SRCS+= mempcpy.c .endif # ${MAKE.OS} == "Darwin" # Provide the same arc4random implementation on Linux/macOS diff --git a/tools/build/cross-build/include/mac/string.h b/tools/build/cross-build/include/mac/string.h index 58464f1f9834..8a36bb1f392d 100644 --- a/tools/build/cross-build/include/mac/string.h +++ b/tools/build/cross-build/include/mac/string.h @@ -37,6 +37,8 @@ #include_next <string.h> +void *(mempcpy)(void * __restrict, const void * __restrict, size_t); + /* * strchrnul is provided by macOS 15.4 and later. However, there is * no good way to detect the current host version at compile time, so diff --git a/tools/test/stress2/misc/all.exclude b/tools/test/stress2/misc/all.exclude index a802f7c21cb1..9ec5bffde0f6 100644 --- a/tools/test/stress2/misc/all.exclude +++ b/tools/test/stress2/misc/all.exclude @@ -22,6 +22,7 @@ gjournal3.sh panic: Bio not on queue 20171225 gjournal4.sh CAM stuck in vmwait 20180517 gnop10.sh Waiting for fix 20230319 gnop13.sh https://people.freebsd.org/~pho/stress/log/log0386.txt 20221113 +gnop3.sh CAM stuck in vmwait 20260219 gnop7.sh Waiting for patch commit 20190820 gnop8.sh Waiting for patch commit 20201214 gnop9.sh Waiting for patch commit 20201214 @@ -29,8 +30,6 @@ graid1_3.sh Hang seen 20250915 graid1_8.sh Known issue 20170909 graid1_9.sh panic: Bad effnlink 20180212 gunion.sh CAM stuk in vmwait 20251226 -ifconfig.sh https://people.freebsd.org/~pho/stress/log/log0626.txt 20251217 -ifconfig2.sh Hang in ifnet_de, vlan_sx and sbwait 20250114 lockf5.sh Spinning threads seen 20160718 maxvnodes2.sh https://people.freebsd.org/~pho/stress/log/log0083.txt 20210329 memguard.sh https://people.freebsd.org/~pho/stress/log/log0088.txt 20210402 @@ -71,10 +70,18 @@ syzkaller16.sh zonelimit issue 20210722 syzkaller28.sh panic: About to free ctl:0x... so:0x... and its in 1 20201120 syzkaller55.sh https://people.freebsd.org/~pho/stress/log/log0533.txt 20240702 syzkaller59.sh Page fault 20220625 +syzkaller68.sh Can not unload zfs.ko after this test 20260206 syzkaller80.sh panic 20250711 syzkaller82.sh panic: m_apply, length > size of mbuf chain 20250724 syzkaller85.sh panic: Assertion uio->uio_resid < 0 failed 20250928 -syzkaller89.sh panic: MNT_DEFERRED requires MNT_RECURSE | MNT_FORCE 20241224 +syzkaller90.sh panic: general protection fault 20260318 +syzkaller91.sh Kernel page fault with the following non-sleepable locks held 20260318 +syzkaller92.sh Kernel page fault with the following non-sleepable locks held 20260318 +syzkaller93.sh panic: _free(0): addr 0xfffff802f7e5a7b8 slab 0xfffffffffffffff 20260318 +syzkaller94.sh panic: ata_action: ccb 0xfffff80347e777b8, func_code 0x1 should 20260318 +syzkaller95.sh Kernel page fault with the following non-sleepable locks held 20260318 +syzkaller97.sh panic: cam_periph_ccbwait: proceeding with incomplete ccb 20260318 +syzkaller98.sh panic: dst_m 0xfffffe00130fd920 is not wired 20260318 quota3.sh https://people.freebsd.org/~pho/stress/log/log0604.txt 20250728 quota6.sh https://people.freebsd.org/~pho/stress/log/log0456.txt 20240707 truss3.sh WiP 20200915 diff --git a/tools/test/stress2/misc/syzkaller90.sh b/tools/test/stress2/misc/syzkaller90.sh new file mode 100755 index 000000000000..f7ff78ff5f65 --- /dev/null +++ b/tools/test/stress2/misc/syzkaller90.sh @@ -0,0 +1,228 @@ +#!/bin/sh + +# cpuid = 4; apic id = 04 +# instruction pointer = 0x20:0xffffffff803a1e9c +# stack pointer = 0x28:0xfffffe0202e4c930 +# frame pointer = 0x28:0xfffffe0202e4c970 +# code segment = base 0x0, limit 0xfffff, type 0x1b +# = DPL 0, pres 1, long 1, def32 0, gran 1 +# processor eflags = interrupt enabled, resume, IOPL = 0 +# current process = 90315 (repro20) +# rdi: fffff803157b7000 rsi: 0000000000000004 rdx: ffffffff81250a83 +# rcx: 0000000000000010 r8: 000000000000000e r9: 1627af6b9da6f5a7 +# rax: 0000000000000010 rbx: fffff803157b7000 rbp: fffffe0202e4c970 +# r10: fffff803157b70c8 r11: fffff807cf9bfcd0 r12: 0000000000000001 +# r13: fffff803157b7048 r14: fffff800035e0ac0 r15: 6e3642f32a3ae6f2 +# trap number = 9 +# panic: general protection fault +# cpuid = 4 +# time = 1773820163 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe0202e4c6b0 +# vpanic() at vpanic+0x136/frame 0xfffffe0202e4c7e0 +# panic() at panic+0x43/frame 0xfffffe0202e4c840 +# trap_fatal() at trap_fatal+0x68/frame 0xfffffe0202e4c860 +# calltrap() at calltrap+0x8/frame 0xfffffe0202e4c860 +# --- trap 0x9, rip = 0xffffffff803a1e9c, rsp = 0xfffffe0202e4c930, rbp = 0xfffffe0202e4c970 --- +# xpt_action_default() at xpt_action_default+0x80c/frame 0xfffffe0202e4c970 +# cam_periph_runccb() at cam_periph_runccb+0xec/frame 0xfffffe0202e4cac0 +# passsendccb() at passsendccb+0x160/frame 0xfffffe0202e4cb30 +# passdoioctl() at passdoioctl+0x3a1/frame 0xfffffe0202e4cb80 +# passioctl() at passioctl+0x22/frame 0xfffffe0202e4cbc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe0202e4cc10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe0202e4cc40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe0202e4ccb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe0202e4ccd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe0202e4cd40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe0202e4ce00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe0202e4cf30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe0202e4cf30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x823e9deca, rsp = 0x820edf228, rbp = 0x820edf250 --- +# KDB: enter: panic +# [ thread pid 90315 tid 851795 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293888 - Fatal trap NUM: general protection fault while in kernel mode in cam_periph_runccb + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// Bug 293888 - Fatal trap NUM: general protection fault while in kernel mode in cam_periph_runccb +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000000, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000000ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOCOMMAND_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0xc4e01a02 (8 bytes) + // arg: ptr[inout, ccb\$pass_cdevsw] { + // union ccb\$pass_cdevsw { + // nvmeio: ccb_nvmeio\$pass_cdevsw { + // ccb_h: ccb_hdr\$pass_cdevsw { + // pinfo: cam_pinfo\$pass_cdevsw { + // priority: int32 = 0x8 (4 bytes) + // generation: int32 = 0x6 (4 bytes) + // index: int32 = 0xb406 (4 bytes) + // } + // pad = 0x0 (4 bytes) + // xpt_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x100000000 (8 bytes) + // priority: int32 = 0x70 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // sim_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x7 (8 bytes) + // priority: int32 = 0x81 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // periph_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x8000000000000000 (8 bytes) + // priority: int32 = 0xffffffc0 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // retry_count: int16 = 0xa5f (2 bytes) + // alloc_flags: int16 = 0xb (2 bytes) + // pad = 0x0 (4 bytes) + // cbfcnp: intptr = 0x3ff (8 bytes) + // func_code: int32 = 0x10 (4 bytes) + // status: int32 = 0x3 (4 bytes) + // path: intptr = 0xe10 (8 bytes) + // path_id: int32 = 0x8 (4 bytes) + // target_id: int32 = 0x7fffffff (4 bytes) + // target_lun: int64 = 0x4 (8 bytes) + // flags: int32 = 0xe (4 bytes) + // xflags: int32 = 0x130d (4 bytes) + // periph_priv: buffer: {5c d8 48 b0 e1 42 d0 a6 b0 73 4f 56 fb 07 + // 08 b5} (length 0x10) sim_priv: buffer: {0f c0 f1 57 fc dc a5 76 + // 71 ad 9f 46 0c eb b2 fc} (length 0x10) qos: buffer: {7a 6f cd f8 + // b3 f0 65 53 2e 65 18 29 70 c1 63 f1} (length 0x10) timeout: + // int32 = 0x8000 (4 bytes) pad = 0x0 (4 bytes) softtimeout: + // timeval { + // sec: intptr = 0x6 (8 bytes) + // usec: intptr = 0x9 (8 bytes) + // } + // } + // payload: buffer: {ec d6 eb 0c 55 29 7e 1e f2 e6 3a 2a f3 42 36 6e + // a7 f5 a6 9d 6b af 27 16 0d 12 f7 c7 a6 d3 dc 8d 89 88 c3 75 c4 2c + // a8 fb 0a 90 70 3d c6 5a 63 b8 ac 32 e2 21 4b 36 13 0e 64 c1 86 b2 + // 38 66 cc bf 6d c9 86 33 8c eb a1 fa b5 dd 55 c8 76 04 6d c2 b8 20 + // 31 11 5f 24 8b f4 d7 00 7c 7a 4f 00 4e fd 2f 0f 57 bc c2 00 22 b1 + // 23 4f 4b 19 c7 9a 47 1e b0 ea 60 87 f3 88 71 9d d1 e4 dd 15 da bf + // 0d 03 34 d9 32 bf b5 80 9f 72 80 dc 37 b2 0e 79 d3 96 93 12 50 0c + // 77 0b d9 9d 0c 93 0c b2 c8 03 bc 75 14 5a c0 50 dc 3f d3 92 ee 07 + // b5 a9 f2 85 76 a7 36 8d 6f 71 fb 8a cb ee 8c 0c 77 8d 81 b0 02 38 + // 70 4a 3d c9 1a f5 4f 91 e6 a1 14 93 3e be a0 e8 7a 69 33 cc e4 d2 + // 8c 88 af c9 05 d4 74 b0 87 a3 34 3b 0c 9e d4 42 bd 8e 03 24 91 2c + // 94 1f 5b 88 7c 0c b2 07 af 68 43 d0 5b cb f9 b2 64 ce b6 c9} + // (length 0x100) + // } + // } + // } + // ] + *(uint32_t*)0x200000000140 = 8; + *(uint32_t*)0x200000000144 = 6; + *(uint32_t*)0x200000000148 = 0xb406; + *(uint64_t*)0x200000000150 = 0x100000000; + *(uint32_t*)0x200000000158 = 0x70; + *(uint64_t*)0x200000000160 = 7; + *(uint32_t*)0x200000000168 = 0x81; + *(uint64_t*)0x200000000170 = 0x8000000000000000; + *(uint32_t*)0x200000000178 = 0xffffffc0; + *(uint16_t*)0x200000000180 = 0xa5f; + *(uint16_t*)0x200000000182 = 0xb; + *(uint64_t*)0x200000000188 = 0x3ff; + *(uint32_t*)0x200000000190 = 0x10; + *(uint32_t*)0x200000000194 = 3; + *(uint64_t*)0x200000000198 = 0xe10; + *(uint32_t*)0x2000000001a0 = 8; + *(uint32_t*)0x2000000001a4 = 0x7fffffff; + *(uint64_t*)0x2000000001a8 = 4; + *(uint32_t*)0x2000000001b0 = 0xe; + *(uint32_t*)0x2000000001b4 = 0x130d; + memcpy((void*)0x2000000001b8, + "\x5c\xd8\x48\xb0\xe1\x42\xd0\xa6\xb0\x73\x4f\x56\xfb\x07\x08\xb5", + 16); + memcpy((void*)0x2000000001c8, + "\x0f\xc0\xf1\x57\xfc\xdc\xa5\x76\x71\xad\x9f\x46\x0c\xeb\xb2\xfc", + 16); + memcpy((void*)0x2000000001d8, + "\x7a\x6f\xcd\xf8\xb3\xf0\x65\x53\x2e\x65\x18\x29\x70\xc1\x63\xf1", + 16); + *(uint32_t*)0x2000000001e8 = 0x8000; + *(uint64_t*)0x2000000001f0 = 6; + *(uint64_t*)0x2000000001f8 = 9; + memcpy( + (void*)0x200000000200, + "\xec\xd6\xeb\x0c\x55\x29\x7e\x1e\xf2\xe6\x3a\x2a\xf3\x42\x36\x6e\xa7\xf5" + "\xa6\x9d\x6b\xaf\x27\x16\x0d\x12\xf7\xc7\xa6\xd3\xdc\x8d\x89\x88\xc3\x75" + "\xc4\x2c\xa8\xfb\x0a\x90\x70\x3d\xc6\x5a\x63\xb8\xac\x32\xe2\x21\x4b\x36" + "\x13\x0e\x64\xc1\x86\xb2\x38\x66\xcc\xbf\x6d\xc9\x86\x33\x8c\xeb\xa1\xfa" + "\xb5\xdd\x55\xc8\x76\x04\x6d\xc2\xb8\x20\x31\x11\x5f\x24\x8b\xf4\xd7\x00" + "\x7c\x7a\x4f\x00\x4e\xfd\x2f\x0f\x57\xbc\xc2\x00\x22\xb1\x23\x4f\x4b\x19" + "\xc7\x9a\x47\x1e\xb0\xea\x60\x87\xf3\x88\x71\x9d\xd1\xe4\xdd\x15\xda\xbf" + "\x0d\x03\x34\xd9\x32\xbf\xb5\x80\x9f\x72\x80\xdc\x37\xb2\x0e\x79\xd3\x96" + "\x93\x12\x50\x0c\x77\x0b\xd9\x9d\x0c\x93\x0c\xb2\xc8\x03\xbc\x75\x14\x5a" + "\xc0\x50\xdc\x3f\xd3\x92\xee\x07\xb5\xa9\xf2\x85\x76\xa7\x36\x8d\x6f\x71" + "\xfb\x8a\xcb\xee\x8c\x0c\x77\x8d\x81\xb0\x02\x38\x70\x4a\x3d\xc9\x1a\xf5" + "\x4f\x91\xe6\xa1\x14\x93\x3e\xbe\xa0\xe8\x7a\x69\x33\xcc\xe4\xd2\x8c\x88" + "\xaf\xc9\x05\xd4\x74\xb0\x87\xa3\x34\x3b\x0c\x9e\xd4\x42\xbd\x8e\x03\x24" + "\x91\x2c\x94\x1f\x5b\x88\x7c\x0c\xb2\x07\xaf\x68\x43\xd0\x5b\xcb\xf9\xb2" + "\x64\xce\xb6\xc9", + 256); + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc4e01a02ul, + /*arg=*/0x200000000140ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller91.sh b/tools/test/stress2/misc/syzkaller91.sh new file mode 100755 index 000000000000..7f11fe33a6ca --- /dev/null +++ b/tools/test/stress2/misc/syzkaller91.sh @@ -0,0 +1,217 @@ +#!/bin/sh + +# Kernel page fault with the following non-sleepable locks held: +# exclusive sleep mutex CAM device lock (CAM device lock) r = 0 (0xfffff80006ad2cd0) locked @ cam/scsi/scsi_pass.c:1766 +# stack backtrace: +# #0 0xffffffff80c4787c at witness_debugger+0x6c +# #1 0xffffffff80c49189 at witness_warn+0x4c9 +# #2 0xffffffff81131d8c at trap_pfault+0x8c +# #3 0xffffffff811015a8 at calltrap+0x8 +# #4 0xffffffff8039de7c at cam_periph_runccb+0xec +# #5 0xffffffff803d9160 at passsendccb+0x160 +# #6 0xffffffff803d8821 at passdoioctl+0x3a1 +# #7 0xffffffff803d8102 at passioctl+0x22 +# #8 0xffffffff80a413b1 at devfs_ioctl+0xd1 +# #9 0xffffffff81204821 at VOP_IOCTL_APV+0x51 +# #10 0xffffffff80cf0890 at vn_ioctl+0x160 +# #11 0xffffffff80a41a7e at devfs_ioctl_f+0x1e +# #12 0xffffffff80c4e3c1 at kern_ioctl+0x2a1 +# #13 0xffffffff80c4e0bf at sys_ioctl+0x12f +# #14 0xffffffff811327d9 at amd64_syscall+0x169 +# #15 0xffffffff81101e9b at fast_syscall_common+0xf8 +# +# +# Fatal trap 12: page fault while in kernel mode +# cpuid = 9; apic id = 09 +# fault virtual address = 0x50 +# fault code = supervisor read data, page not present +# instruction pointer = 0x20:0xffffffff803a1e9c +# stack pointer = 0x28:0xfffffe01001f2930 +# frame pointer = 0x28:0xfffffe01001f2970 +# code segment = base 0x0, limit 0xfffff, type 0x1b +# = DPL 0, pres 1, long 1, def32 0, gran 1 +# processor eflags = interrupt enabled, resume, IOPL = 0 +# current process = 3759 (syzkaller91) +# rdi: fffff80006ac0800 rsi: 0000000000000004 rdx: ffffffff81250a83 +# rcx: 0000000000000010 r8: 0000000000000008 r9: 0000000000000000 +# rax: 0000000000000010 rbx: fffff80006ac0800 rbp: fffffe01001f2970 +# r10: fffff80006ac08c8 r11: 0000000000000001 r12: 0000000000000001 +# r13: fffff80006ac0848 r14: fffff80006b9d2c0 r15: 0000000000000000 +# trap number = 12 +# panic: page fault +# cpuid = 9 +# time = 1773832077 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe01001f2660 +# vpanic() at vpanic+0x136/frame 0xfffffe01001f2790 +# panic() at panic+0x43/frame 0xfffffe01001f27f0 +# trap_pfault() at trap_pfault+0x422/frame 0xfffffe01001f2860 +# calltrap() at calltrap+0x8/frame 0xfffffe01001f2860 +# --- trap 0xc, rip = 0xffffffff803a1e9c, rsp = 0xfffffe01001f2930, rbp = 0xfffffe01001f2970 --- +# xpt_action_default() at xpt_action_default+0x80c/frame 0xfffffe01001f2970 +# cam_periph_runccb() at cam_periph_runccb+0xec/frame 0xfffffe01001f2ac0 +# passsendccb() at passsendccb+0x160/frame 0xfffffe01001f2b30 +# passdoioctl() at passdoioctl+0x3a1/frame 0xfffffe01001f2b80 +# passioctl() at passioctl+0x22/frame 0xfffffe01001f2bc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe01001f2c10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe01001f2c40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe01001f2cb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe01001f2cd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe01001f2d40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe01001f2e00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe01001f2f30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe01001f2f30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x823e6feca, rsp = 0x820c6d558, rbp = 0x820c6d580 --- +# KDB: enter: panic +# [ thread pid 3759 tid 100348 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# [Bug 293890] Fatal trap NUM: page fault while in kernel mode in cam_periph_runccb + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOCOMMAND_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0xc4e01a02 (8 bytes) + // arg: ptr[inout, ccb\$pass_cdevsw] { + // union ccb\$pass_cdevsw { + // ccb_h: ccb_hdr\$pass_cdevsw { + // pinfo: cam_pinfo\$pass_cdevsw { + // priority: int32 = 0x5 (4 bytes) + // generation: int32 = 0x2 (4 bytes) + // index: int32 = 0x3 (4 bytes) + // } + // pad = 0x0 (4 bytes) + // xpt_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0xb (8 bytes) + // priority: int32 = 0x6 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // sim_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x8 (8 bytes) + // priority: int32 = 0x6 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // periph_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0xfe (8 bytes) + // priority: int32 = 0x6 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // retry_count: int16 = 0x3 (2 bytes) + // alloc_flags: int16 = 0x5 (2 bytes) + // pad = 0x0 (4 bytes) + // cbfcnp: intptr = 0xbfc (8 bytes) + // func_code: int32 = 0x10 (4 bytes) + // status: int32 = 0x4 (4 bytes) + // path: intptr = 0x5 (8 bytes) + // path_id: int32 = 0x0 (4 bytes) + // target_id: int32 = 0x2 (4 bytes) + // target_lun: int64 = 0x7e2 (8 bytes) + // flags: int32 = 0x8 (4 bytes) + // xflags: int32 = 0x3 (4 bytes) + // periph_priv: buffer: {bc 09 6b 26 d7 02 3b 02 06 84 bf 81 a9 85 11 + // 50} (length 0x10) sim_priv: buffer: {a5 da 75 ef af 1d 7f d5 40 94 + // 02 67 14 f6 36 17} (length 0x10) qos: buffer: {74 70 33 74 c5 58 + // 85 93 b4 d5 75 39 9f 79 94 a4} (length 0x10) timeout: int32 = 0x2 + // (4 bytes) pad = 0x0 (4 bytes) softtimeout: timeval { + // sec: intptr = 0x6e (8 bytes) + // usec: intptr = 0x400 (8 bytes) + // } + // } + // } + // } + // ] + *(uint32_t*)0x200000000240 = 5; + *(uint32_t*)0x200000000244 = 2; + *(uint32_t*)0x200000000248 = 3; + *(uint64_t*)0x200000000250 = 0xb; + *(uint32_t*)0x200000000258 = 6; + *(uint64_t*)0x200000000260 = 8; + *(uint32_t*)0x200000000268 = 6; + *(uint64_t*)0x200000000270 = 0xfe; + *(uint32_t*)0x200000000278 = 6; + *(uint16_t*)0x200000000280 = 3; + *(uint16_t*)0x200000000282 = 5; + *(uint64_t*)0x200000000288 = 0xbfc; + *(uint32_t*)0x200000000290 = 0x10; + *(uint32_t*)0x200000000294 = 4; + *(uint64_t*)0x200000000298 = 5; + *(uint32_t*)0x2000000002a0 = 0; + *(uint32_t*)0x2000000002a4 = 2; + *(uint64_t*)0x2000000002a8 = 0x7e2; + *(uint32_t*)0x2000000002b0 = 8; + *(uint32_t*)0x2000000002b4 = 3; + memcpy((void*)0x2000000002b8, + "\xbc\x09\x6b\x26\xd7\x02\x3b\x02\x06\x84\xbf\x81\xa9\x85\x11\x50", + 16); + memcpy((void*)0x2000000002c8, + "\xa5\xda\x75\xef\xaf\x1d\x7f\xd5\x40\x94\x02\x67\x14\xf6\x36\x17", + 16); + memcpy((void*)0x2000000002d8, + "\x74\x70\x33\x74\xc5\x58\x85\x93\xb4\xd5\x75\x39\x9f\x79\x94\xa4", + 16); + *(uint32_t*)0x2000000002e8 = 2; + *(uint64_t*)0x2000000002f0 = 0x6e; + *(uint64_t*)0x2000000002f8 = 0x400; + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc4e01a02ul, + /*arg=*/0x200000000240ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller92.sh b/tools/test/stress2/misc/syzkaller92.sh new file mode 100755 index 000000000000..428fdaa8815d --- /dev/null +++ b/tools/test/stress2/misc/syzkaller92.sh @@ -0,0 +1,265 @@ +#!/bin/sh + +# Kernel page fault with the following non-sleepable locks held: +# exclusive sleep mutex CAM device lock (CAM device lock) r = 0 (0xfffff8000365ecd0) locked @ cam/scsi/scsi_pass.c:1973 +# stack backtrace: +# #0 0xffffffff80c4787c at witness_debugger+0x6c +# #1 0xffffffff80c49189 at witness_warn+0x4c9 +# #2 0xffffffff81131d8c at trap_pfault+0x8c +# #3 0xffffffff811015a8 at calltrap+0x8 +# #4 0xffffffff803d8e3e at passdoioctl+0x9be +# #5 0xffffffff803d8102 at passioctl+0x22 +# #6 0xffffffff80a413b1 at devfs_ioctl+0xd1 +# #7 0xffffffff81204821 at VOP_IOCTL_APV+0x51 +# #8 0xffffffff80cf0890 at vn_ioctl+0x160 +# #9 0xffffffff80a41a7e at devfs_ioctl_f+0x1e +# #10 0xffffffff80c4e3c1 at kern_ioctl+0x2a1 +# #11 0xffffffff80c4e0bf at sys_ioctl+0x12f +# #12 0xffffffff811327d9 at amd64_syscall+0x169 +# #13 0xffffffff81101e9b at fast_syscall_common+0xf8 +# +# +# Fatal trap 12: page fault while in kernel mode +# cpuid = 11; apic id = 0b +# fault virtual address = 0x50 +# fault code = supervisor read data, page not present +# instruction pointer = 0x20:0xffffffff803a1e9c +# stack pointer = 0x28:0xfffffe01000d5af0 +# frame pointer = 0x28:0xfffffe01000d5b30 +# code segment = base 0x0, limit 0xfffff, type 0x1b +# = DPL 0, pres 1, long 1, def32 0, gran 1 +# processor eflags = interrupt enabled, resume, IOPL = 0 +# current process = 4511 (syzkaller92) +# rdi: fffff8016ace27b8 rsi: fffff8016ace2f60 rdx: 0000000000000010 +# rcx: 0000000000000010 r8: fffff8000602ad80 r9: ffffffff8226dee8 +# rax: 0000000000000010 rbx: fffff8016ace27b8 rbp: fffffe01000d5b30 +# r10: fffff8016ace27b8 r11: fffff80066e42cd0 r12: fffff8016ace27b8 +# r13: 0000000000000016 r14: fffff80003676200 r15: 0000000000000000 +# trap number = 12 +# panic: page fault +# cpuid = 11 +# time = 1773833440 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe01000d5820 +# vpanic() at vpanic+0x136/frame 0xfffffe01000d5950 +# panic() at panic+0x43/frame 0xfffffe01000d59b0 +# trap_pfault() at trap_pfault+0x422/frame 0xfffffe01000d5a20 +# calltrap() at calltrap+0x8/frame 0xfffffe01000d5a20 +# --- trap 0xc, rip = 0xffffffff803a1e9c, rsp = 0xfffffe01000d5af0, rbp = 0xfffffe01000d5b30 --- +# xpt_action_default() at xpt_action_default+0x80c/frame 0xfffffe01000d5b30 +# passdoioctl() at passdoioctl+0x9be/frame 0xfffffe01000d5b80 +# passioctl() at passioctl+0x22/frame 0xfffffe01000d5bc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe01000d5c10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe01000d5c40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe01000d5cb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe01000d5cd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe01000d5d40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe01000d5e00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe01000d5f30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe01000d5f30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x824057eca, rsp = 0x820f14468, rbp = 0x820f14490 --- +# KDB: enter: panic +# [ thread pid 4511 tid 100357 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> reset + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# [Bug 293892] Fatal trap NUM: page fault while in kernel mode in passsendccb + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +#ifndef SYS_aio_readv +#define SYS_aio_readv 579 +#endif + +uint64_t r[2] = {0xffffffffffffffff, 0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // rfork arguments: [ + // flags: rfork_flags = 0x14014 (8 bytes) + // ] + syscall(SYS_rfork, /*flags=RFLINUXTHPN|RFSIGSHARE|RFFDG|RFPROC*/ 0x14014ul); + // freebsd11_fhstatfs arguments: [ + // fhp: nil + // buf: nil + // ] + syscall(SYS_freebsd11_fhstatfs, /*fhp=*/0ul, /*buf=*/0ul); + // socket\$inet_tcp arguments: [ + // domain: const = 0x2 (8 bytes) + // type: const = 0x1 (8 bytes) + // proto: const = 0x0 (1 bytes) + // ] + // returns sock_tcp + syscall(SYS_socket, /*domain=*/2ul, /*type=*/1ul, /*proto=*/0); + // openat\$bpf arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 62 70 66 00} (length 0x9) + // } + // flags: open_flags = 0x8408 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_bpf + memcpy((void*)0x200000000980, "/dev/bpf\000", 9); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000980ul, + /*flags=O_TRUNC|O_NOCTTY|O_APPEND*/ 0x8408, /*mode=*/0); + if (res != -1) + r[0] = res; + // aio_readv arguments: [ + // iocb: ptr[in, aiocb] { + // aiocb { + // aio_fildes: fd (resource) + // pad = 0x0 (4 bytes) + // aio_offset: int64 = 0x81 (8 bytes) + // aio_buf: ptr[in, buffer] { + // buffer: {fa} (length 0x1) + // } + // aio_nbytes: len = 0x1 (8 bytes) + // spare: array[int32] { + // int32 = 0xffff (4 bytes) + // int32 = 0x7 (4 bytes) + // } + // spare2: intptr = 0x1 (8 bytes) + // aio_lio_opcode: lio_opcodes = 0x18 (4 bytes) + // aio_reqprio: int32 = 0x1ff (4 bytes) + // aiocb_private: aiocb_private { + // status: intptr = 0x37 (8 bytes) + // error: intptr = 0x24 (8 bytes) + // kernelinfo: nil + // } + // aio_sigevent: sigevent { + // notify: sigev_notify = 0x0 (4 bytes) + // signo: int32 = 0x13 (4 bytes) + // val: union sigval { + // sigval_int: int32 = 0x6 (4 bytes) + // } + // u: union sigevent_u { + // ke_flags: evflags = 0x8000 (2 bytes) + // } + // } + // } + // } + // ] + *(uint32_t*)0x200000000040 = r[0]; + *(uint64_t*)0x200000000048 = 0x81; + *(uint64_t*)0x200000000050 = 0x200000000000; + memset((void*)0x200000000000, 250, 1); + *(uint64_t*)0x200000000058 = 1; + *(uint32_t*)0x200000000060 = 0xffff; + *(uint32_t*)0x200000000064 = 7; + *(uint64_t*)0x200000000068 = 1; + *(uint32_t*)0x200000000070 = 0x18; + *(uint32_t*)0x200000000074 = 0x1ff; + *(uint64_t*)0x200000000078 = 0x37; + *(uint64_t*)0x200000000080 = 0x24; + *(uint64_t*)0x200000000088 = 0; + *(uint32_t*)0x200000000090 = 0; + *(uint32_t*)0x200000000094 = 0x13; + *(uint32_t*)0x200000000098 = 6; + *(uint16_t*)0x2000000000a0 = 0x8000; + syscall(SYS_aio_readv, /*iocb=*/0x200000000040ul); + // openat\$bpf arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 62 70 66 00} (length 0x9) + // } + // flags: open_flags = 0x800 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_bpf + memcpy((void*)0x200000000040, "/dev/bpf\000", 9); + syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, /*file=*/0x200000000040ul, + /*flags=O_EXCL*/ 0x800, /*mode=*/0); + // sigaction arguments: [ + // signo: int32 = 0x6b (4 bytes) + // act: ptr[in, sigaction] { + // sigaction { + // sigaction_u: nil + // sa_flags: sigaction_flags = 0x0 (4 bytes) + // sa_mask: sigset { + // mask: array[int32] { + // int32 = 0x4 (4 bytes) + // int32 = 0x10 (4 bytes) + // int32 = 0x492d (4 bytes) + // int32 = 0x3 (4 bytes) + // } + // } + // pad = 0x0 (4 bytes) + // } + // } + // oact: nil + // ] + *(uint64_t*)0x200000000040 = 0; + *(uint32_t*)0x200000000048 = 0; + *(uint32_t*)0x20000000004c = 4; + *(uint32_t*)0x200000000050 = 0x10; + *(uint32_t*)0x200000000054 = 0x492d; + *(uint32_t*)0x200000000058 = 3; + syscall(SYS_sigaction, /*signo=*/0x6b, /*act=*/0x200000000040ul, + /*oact=*/0ul); + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[1] = res; + // ioctl\$CAMIOQUEUE_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0x20001a04 (8 bytes) + // arg: ptr[in, ptr[in, ccb\$pass_cdevsw]] { + // nil + // } + // ] + *(uint64_t*)0x200000000000 = 0; + syscall(SYS_ioctl, /*fd=*/r[1], /*cmd=*/0x20001a04ul, + /*arg=*/0x200000000000ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller93.sh b/tools/test/stress2/misc/syzkaller93.sh new file mode 100755 index 000000000000..208b90d78516 --- /dev/null +++ b/tools/test/stress2/misc/syzkaller93.sh @@ -0,0 +1,137 @@ +#!/bin/sh + +# (pass0:ahcich1:0:0:0): xpt_action_default: CCB type 0x380 0x380 not supported +# panic: _free(0): addr 0xfffff802f7e5a7b8 slab 0xffffffffffffffff with unknown cookie 3 +# cpuid = 8 +# time = 1773835096 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe00ffe5fc60 +# vpanic() at vpanic+0x136/frame 0xfffffe00ffe5fd90 +# panic() at panic+0x43/frame 0xfffffe00ffe5fdf0 +# free() at free+0x213/frame 0xfffffe00ffe5fe30 +# xpt_release_ccb() at xpt_release_ccb+0x50/frame 0xfffffe00ffe5fe60 +# xpt_done_process() at xpt_done_process+0x3e0/frame 0xfffffe00ffe5fea0 +# xpt_done_td() at xpt_done_td+0x145/frame 0xfffffe00ffe5fef0 +# fork_exit() at fork_exit+0x82/frame 0xfffffe00ffe5ff30 +# fork_trampoline() at fork_trampoline+0xe/frame 0xfffffe00ffe5ff30 +# --- trap 0, rip = 0, rsp = 0, rbp = 0 --- +# KDB: enter: panic +# [ thread pid 4 tid 100122 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# [Bug 293893] panic: _free(NUM): address ADDR(ADDR) has not been allocated + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // sendfile arguments: [ + // fd: fd (resource) + // s: sock_in (resource) + // offset: intptr = 0x4 (8 bytes) + // nbytes: int64 = 0x4 (8 bytes) + // hdtr: ptr[in, sf_hdtr] { + // sf_hdtr { + // headers: ptr[in, array[iovec_in]] { + // array[iovec_in] { + // iovec_in { + // addr: nil + // len: len = 0x0 (8 bytes) + // } + // iovec_in { + // addr: ptr[in, buffer] { + // buffer: {} (length 0x0) + // } + // len: len = 0x0 (8 bytes) + // } + // } + // } + // hdr_cnt: len = 0x2 (4 bytes) + // pad = 0x0 (4 bytes) + // trailers: nil + // trl_cnt: len = 0x0 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // } + // sbytes: nil + // flags: sf_flags = 0x1 (8 bytes) + // ] + *(uint64_t*)0x200000001ac0 = 0x200000000280; + *(uint64_t*)0x200000000280 = 0; + *(uint64_t*)0x200000000288 = 0; + *(uint64_t*)0x200000000290 = 0x200000000380; + *(uint64_t*)0x200000000298 = 0; + *(uint32_t*)0x200000001ac8 = 2; + *(uint64_t*)0x200000001ad0 = 0; + *(uint32_t*)0x200000001ad8 = 0; + syscall(SYS_sendfile, /*fd=*/(intptr_t)-1, /*s=*/(intptr_t)-1, /*offset=*/4ul, + /*nbytes=*/4ul, /*hdtr=*/0x200000001ac0ul, /*sbytes=*/0ul, + /*flags=SF_NODISKIO*/ 1ul); + // ioctl\$CAMIOQUEUE_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0x20001a04 (8 bytes) + // arg: ptr[in, ptr[in, ccb\$pass_cdevsw]] { + // nil + // } + // ] + *(uint64_t*)0x200000000240 = 0; + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0x20001a04ul, + /*arg=*/0x200000000240ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller94.sh b/tools/test/stress2/misc/syzkaller94.sh new file mode 100755 index 000000000000..ae37ad964964 --- /dev/null +++ b/tools/test/stress2/misc/syzkaller94.sh @@ -0,0 +1,185 @@ +#!/bin/sh + +# panic: ata_action: ccb 0xfffff80347e777b8, func_code 0x1 should not be allocated from UMA zone +# cpuid = 1 +# time = 1773837671 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe0100044980 +# vpanic() at vpanic+0x136/frame 0xfffffe0100044ab0 +# panic() at panic+0x43/frame 0xfffffe0100044b10 +# ata_action() at ata_action+0x3bd/frame 0xfffffe0100044b30 +# passdoioctl() at passdoioctl+0x9be/frame 0xfffffe0100044b80 +# passioctl() at passioctl+0x22/frame 0xfffffe0100044bc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe0100044c10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe0100044c40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe0100044cb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe0100044cd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe0100044d40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe0100044e00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe0100044f30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe0100044f30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x823bc5eca, rsp = 0x820d83df8, rbp = 0x820d83e20 --- +# KDB: enter: panic +# [ thread pid 4628 tid 100215 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293895 - panic: ata_action: ccb ADDR, func_code XXX should not be allocated from UMA zone + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // sigaction arguments: [ + // signo: int32 = 0x68 (4 bytes) + // act: ptr[in, sigaction] { + // sigaction { + // sigaction_u: nil + // sa_flags: sigaction_flags = 0x2 (4 bytes) + // sa_mask: sigset { + // mask: array[int32] { + // int32 = 0xe4 (4 bytes) + // int32 = 0x1 (4 bytes) + // int32 = 0x4000a (4 bytes) + // int32 = 0xe (4 bytes) + // } + // } + // pad = 0x0 (4 bytes) + // } + // } + // oact: nil + // ] + *(uint64_t*)0x200000000040 = 0; + *(uint32_t*)0x200000000048 = 2; + *(uint32_t*)0x20000000004c = 0xe4; + *(uint32_t*)0x200000000050 = 1; + *(uint32_t*)0x200000000054 = 0x4000a; + *(uint32_t*)0x200000000058 = 0xe; + syscall(SYS_sigaction, /*signo=*/0x68, /*act=*/0x200000000040ul, + /*oact=*/0ul); + // mount\$nfs_newnfs_vnodeops_nosig arguments: [ + // fstype: ptr[in, buffer] { + // buffer: {6e 66 73 00} (length 0x4) + // } + // dir: ptr[in, buffer] { + // buffer: {2e 2f 66 69 6c 65 30 00} (length 0x8) + // } + // mnt_flags: mount_flags = 0x0 (4 bytes) + // data: ptr[in, nfs_args\$newnfs_vnodeops_nosig] { + // nfs_args\$newnfs_vnodeops_nosig { + // version: const = 0x3 (4 bytes) + // pad = 0x0 (4 bytes) + // addr: nil + // addrlen: len = 0x0 (4 bytes) + // sotype: sock_type_newnfs_vnodeops_nosig = 0x2 (4 bytes) + // proto: int32 = 0x4010003 (4 bytes) + // pad = 0x0 (4 bytes) + // fh: nil + // fhsize: len = 0x0 (4 bytes) + // nfs_flags: nfs_mount_flags_newnfs_vnodeops_nosig = 0x8cc006 (4 + // bytes) wsize: int32 = 0x7fff (4 bytes) rsize: int32 = 0xaf8 (4 + // bytes) readdirsize: int32 = 0x9 (4 bytes) timeo: int32 = 0x3 (4 + // bytes) retrans: int32 = 0x800 (4 bytes) maxgrouplist: int32 = 0x9 (4 + // bytes) readahead: int32 = 0x1 (4 bytes) wcommitsize: int32 = 0x7 (4 + // bytes) deadthresh: int32 = 0x1 (4 bytes) pad = 0x0 (4 bytes) + // hostname: nil + // acregmin: int32 = 0x204 (4 bytes) + // acregmax: int32 = 0x0 (4 bytes) + // acdirmin: int32 = 0xfffffff6 (4 bytes) + // acdirmax: int32 = 0x3 (4 bytes) + // } + // } + // ] + memcpy((void*)0x200000000040, "nfs\000", 4); + memcpy((void*)0x200000000080, "./file0\000", 8); + *(uint32_t*)0x200000000200 = 3; + *(uint64_t*)0x200000000208 = 0; + *(uint32_t*)0x200000000210 = 0; + *(uint32_t*)0x200000000214 = 2; + *(uint32_t*)0x200000000218 = 0x4010003; + *(uint64_t*)0x200000000220 = 0; + *(uint32_t*)0x200000000228 = 0; + *(uint32_t*)0x20000000022c = 0x8cc006; + *(uint32_t*)0x200000000230 = 0x7fff; + *(uint32_t*)0x200000000234 = 0xaf8; + *(uint32_t*)0x200000000238 = 9; + *(uint32_t*)0x20000000023c = 3; + *(uint32_t*)0x200000000240 = 0x800; + *(uint32_t*)0x200000000244 = 9; + *(uint32_t*)0x200000000248 = 1; + *(uint32_t*)0x20000000024c = 7; + *(uint32_t*)0x200000000250 = 1; + *(uint64_t*)0x200000000258 = 0; + *(uint32_t*)0x200000000260 = 0x204; + *(uint32_t*)0x200000000264 = 0; + *(uint32_t*)0x200000000268 = 0xfffffff6; + *(uint32_t*)0x20000000026c = 3; + syscall(SYS_mount, /*fstype=*/0x200000000040ul, /*dir=*/0x200000000080ul, + /*mnt_flags=*/0, /*data=*/0x200000000200ul); + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOQUEUE_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0x20001a04 (8 bytes) + // arg: ptr[in, ptr[in, ccb\$pass_cdevsw]] { + // nil + // } + // ] + *(uint64_t*)0x200000000000 = 0; + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0x20001a04ul, + /*arg=*/0x200000000000ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller95.sh b/tools/test/stress2/misc/syzkaller95.sh new file mode 100755 index 000000000000..be5549788b92 --- /dev/null +++ b/tools/test/stress2/misc/syzkaller95.sh @@ -0,0 +1,453 @@ +#!/bin/sh + +# Kernel page fault with the following non-sleepable locks held: +# exclusive sleep mutex CAM device lock (CAM device lock) r = 0 (0xfffff80006bd2cd0) locked @ cam/scsi/scsi_pass.c:1766 +# stack backtrace: +# #0 0xffffffff80c4787c at witness_debugger+0x6c +# #1 0xffffffff80c49189 at witness_warn+0x4c9 +# #2 0xffffffff81131d8c at trap_pfault+0x8c +# #3 0xffffffff811015a8 at calltrap+0x8 +# #4 0xffffffff803d9061 at passsendccb+0x61 +# #5 0xffffffff803d8821 at passdoioctl+0x3a1 +# #6 0xffffffff803d8102 at passioctl+0x22 +# #7 0xffffffff80a413b1 at devfs_ioctl+0xd1 +# #8 0xffffffff81204821 at VOP_IOCTL_APV+0x51 +# #9 0xffffffff80cf0890 at vn_ioctl+0x160 +# #10 0xffffffff80a41a7e at devfs_ioctl_f+0x1e +# #11 0xffffffff80c4e3c1 at kern_ioctl+0x2a1 +# #12 0xffffffff80c4e0bf at sys_ioctl+0x12f +# #13 0xffffffff811327d9 at amd64_syscall+0x169 +# #14 0xffffffff81101e9b at fast_syscall_common+0xf8 +# +# +# Fatal trap 12: page fault while in kernel mode +# cpuid = 4; apic id = 04 +# fault virtual address = 0x800000006 +# fault code = supervisor read data, page not present +# instruction pointer = 0x20:0xffffffff8112edf5 +# frame pointer = 0x28:0xfffffe010003fab0 +# code segment = base 0x0, limit 0xfffff, type 0x1b +# = DPL 0, pres 1, long 1, def32 0, gran 1 +# processor eflags = interrupt enabled, resume, IOPL = 0 +# current process = 5440 (syzkaller95) +# rdi: fffffe010003fac0 rsi: 0000000800000006 rdx: 0000000000000002 +# rcx: 0000000000000002 r8: 0000000800000006 r9: 06eb28196e3b02c0 +# rax: 0000000000000000 rbx: fffff80003e97800 rbp: fffffe010003fab0 +# r10: fffff80003e978c8 r11: fffff800048e5550 r12: fffffe010003fac0 +# r13: fffff80006350d80 r14: fffff80306280800 r15: fffff80006bd6100 +# trap number = 12 +# panic: page fault +# cpuid = 4 +# time = 1773848380 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe010003f7e0 +# vpanic() at vpanic+0x136/frame 0xfffffe010003f910 +# panic() at panic+0x43/frame 0xfffffe010003f970 +# trap_pfault() at trap_pfault+0x422/frame 0xfffffe010003f9e0 +# calltrap() at calltrap+0x8/frame 0xfffffe010003f9e0 +# --- trap 0xc, rip = 0xffffffff8112edf5, rsp = 0xfffffe010003fab0, rbp = 0xfffffe010003fab0 --- +# copyin_nosmap_erms() at copyin_nosmap_erms+0x115/frame 0xfffffe010003fab0 +# passsendccb() at passsendccb+0x61/frame 0xfffffe010003fb30 +# passdoioctl() at passdoioctl+0x3a1/frame 0xfffffe010003fb80 +# passioctl() at passioctl+0x22/frame 0xfffffe010003fbc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe010003fc10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe010003fc40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe010003fcb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe010003fcd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe010003fd40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe010003fe00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe010003ff30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe010003ff30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x823c07eca, rsp = 0x8209c6ce8, rbp = 0x8209c6d10 --- +# KDB: enter: panic +# [ thread pid 5440 tid 100235 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293892 - Fatal trap NUM: page fault while in kernel mode in passsendccb + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // ioctl\$MDIOCDETACH arguments: [ + // fd: fd_md (resource) + // cmd: const = 0xc1c06d01 (8 bytes) + // arg: ptr[inout, md_ioctl] { + // md_ioctl { + // md_version: int32 = 0xe (4 bytes) + // md_unit: int32 = 0x3 (4 bytes) + // md_type: md_types_flags = 0x0 (4 bytes) + // pad = 0x0 (4 bytes) + // md_file: nil + // md_mediasize: int64 = 0x81 (8 bytes) + // md_sectorsize: int32 = 0x4 (4 bytes) + // md_options: int32 = 0x5 (4 bytes) + // md_base: int64 = 0x6 (8 bytes) + // md_fwheads: int32 = 0x4 (4 bytes) + // md_fwsectors: int32 = 0x1 (4 bytes) + // md_label: nil + // md_pad: array[int32] { + // int32 = 0x8 (4 bytes) + // int32 = 0x5 (4 bytes) + // int32 = 0x6 (4 bytes) + // int32 = 0xc3b (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0xa (4 bytes) + // int32 = 0xfffffffe (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x80 (4 bytes) + // int32 = 0xd22 (4 bytes) + // int32 = 0xa1a5 (4 bytes) + // int32 = 0x0 (4 bytes) + // int32 = 0xfffffff8 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0xffffffff (4 bytes) + // int32 = 0x100 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0x8 (4 bytes) + // int32 = 0x5b8f6f5f (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0xfffffffb (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x3 (4 bytes) + // int32 = 0x6 (4 bytes) + // int32 = 0x1 (4 bytes) + // int32 = 0x800 (4 bytes) + // int32 = 0x6b0000 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0x7ff (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x7 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0x8000 (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x5be (4 bytes) + // int32 = 0xf0000000 (4 bytes) + // int32 = 0x1db (4 bytes) + // int32 = 0x3 (4 bytes) + // int32 = 0x0 (4 bytes) + // int32 = 0x8 (4 bytes) + // int32 = 0x18000000 (4 bytes) + // int32 = 0xfd6 (4 bytes) + // int32 = 0x1 (4 bytes) + // int32 = 0x8 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0x0 (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0xe2 (4 bytes) + // int32 = 0x0 (4 bytes) + // int32 = 0x5 (4 bytes) + // int32 = 0x1cd (4 bytes) + // int32 = 0xcf58 (4 bytes) + // int32 = 0x6 (4 bytes) + // int32 = 0x2e7 (4 bytes) + // int32 = 0x64d (4 bytes) + // int32 = 0x2a4 (4 bytes) + // int32 = 0x7 (4 bytes) + // int32 = 0x6 (4 bytes) + // int32 = 0x8 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0x7 (4 bytes) + // int32 = 0x6 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0xfffffff8 (4 bytes) + // int32 = 0x5 (4 bytes) + // int32 = 0xe53 (4 bytes) + // int32 = 0x81 (4 bytes) + // int32 = 0x3 (4 bytes) + // int32 = 0x0 (4 bytes) + // int32 = 0x80000001 (4 bytes) + // int32 = 0x5 (4 bytes) + // int32 = 0x54 (4 bytes) + // int32 = 0x401 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0x3 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x1 (4 bytes) + // int32 = 0x9 (4 bytes) + // int32 = 0xed (4 bytes) + // int32 = 0x1f (4 bytes) + // int32 = 0x5 (4 bytes) + // int32 = 0xd (4 bytes) + // int32 = 0x8001 (4 bytes) + // int32 = 0xfff (4 bytes) + // int32 = 0x2 (4 bytes) + // int32 = 0x7fffffff (4 bytes) + // int32 = 0xd (4 bytes) + // int32 = 0x1 (4 bytes) + // int32 = 0x401 (4 bytes) + // int32 = 0x4 (4 bytes) + // int32 = 0xa043 (4 bytes) + // } + // } + // } + // ] + *(uint32_t*)0x200000000300 = 0xe; + *(uint32_t*)0x200000000304 = 3; + *(uint32_t*)0x200000000308 = 0; + *(uint64_t*)0x200000000310 = 0; + *(uint64_t*)0x200000000318 = 0x81; + *(uint32_t*)0x200000000320 = 4; + *(uint32_t*)0x200000000324 = 5; + *(uint64_t*)0x200000000328 = 6; + *(uint32_t*)0x200000000330 = 4; + *(uint32_t*)0x200000000334 = 1; + *(uint64_t*)0x200000000338 = 0; + *(uint32_t*)0x200000000340 = 8; + *(uint32_t*)0x200000000344 = 5; + *(uint32_t*)0x200000000348 = 6; + *(uint32_t*)0x20000000034c = 0xc3b; + *(uint32_t*)0x200000000350 = 2; + *(uint32_t*)0x200000000354 = 4; + *(uint32_t*)0x200000000358 = 0xa; + *(uint32_t*)0x20000000035c = 0xfffffffe; + *(uint32_t*)0x200000000360 = 2; + *(uint32_t*)0x200000000364 = 0x80; + *(uint32_t*)0x200000000368 = 0xd22; + *(uint32_t*)0x20000000036c = 0xa1a5; + *(uint32_t*)0x200000000370 = 0; + *(uint32_t*)0x200000000374 = 0xfffffff8; + *(uint32_t*)0x200000000378 = 4; + *(uint32_t*)0x20000000037c = -1; + *(uint32_t*)0x200000000380 = 0x100; + *(uint32_t*)0x200000000384 = 4; + *(uint32_t*)0x200000000388 = 8; + *(uint32_t*)0x20000000038c = 0x5b8f6f5f; + *(uint32_t*)0x200000000390 = 9; + *(uint32_t*)0x200000000394 = 0xfffffffb; + *(uint32_t*)0x200000000398 = 2; + *(uint32_t*)0x20000000039c = 3; + *(uint32_t*)0x2000000003a0 = 6; + *(uint32_t*)0x2000000003a4 = 1; + *(uint32_t*)0x2000000003a8 = 0x800; + *(uint32_t*)0x2000000003ac = 0x6b0000; + *(uint32_t*)0x2000000003b0 = 4; + *(uint32_t*)0x2000000003b4 = 4; + *(uint32_t*)0x2000000003b8 = 0x7ff; + *(uint32_t*)0x2000000003bc = 2; + *(uint32_t*)0x2000000003c0 = 7; + *(uint32_t*)0x2000000003c4 = 9; + *(uint32_t*)0x2000000003c8 = 9; + *(uint32_t*)0x2000000003cc = 0x8000; + *(uint32_t*)0x2000000003d0 = 2; + *(uint32_t*)0x2000000003d4 = 0x5be; + *(uint32_t*)0x2000000003d8 = 0xf0000000; + *(uint32_t*)0x2000000003dc = 0x1db; + *(uint32_t*)0x2000000003e0 = 3; + *(uint32_t*)0x2000000003e4 = 0; + *(uint32_t*)0x2000000003e8 = 8; + *(uint32_t*)0x2000000003ec = 0x18000000; + *(uint32_t*)0x2000000003f0 = 0xfd6; + *(uint32_t*)0x2000000003f4 = 1; + *(uint32_t*)0x2000000003f8 = 8; + *(uint32_t*)0x2000000003fc = 4; + *(uint32_t*)0x200000000400 = 0; + *(uint32_t*)0x200000000404 = 2; + *(uint32_t*)0x200000000408 = 0xe2; + *(uint32_t*)0x20000000040c = 0; + *(uint32_t*)0x200000000410 = 5; + *(uint32_t*)0x200000000414 = 0x1cd; + *(uint32_t*)0x200000000418 = 0xcf58; + *(uint32_t*)0x20000000041c = 6; + *(uint32_t*)0x200000000420 = 0x2e7; + *(uint32_t*)0x200000000424 = 0x64d; + *(uint32_t*)0x200000000428 = 0x2a4; + *(uint32_t*)0x20000000042c = 7; + *(uint32_t*)0x200000000430 = 6; + *(uint32_t*)0x200000000434 = 8; + *(uint32_t*)0x200000000438 = 9; + *(uint32_t*)0x20000000043c = 7; + *(uint32_t*)0x200000000440 = 6; + *(uint32_t*)0x200000000444 = 9; + *(uint32_t*)0x200000000448 = 2; + *(uint32_t*)0x20000000044c = 0xfffffff8; + *(uint32_t*)0x200000000450 = 5; + *(uint32_t*)0x200000000454 = 0xe53; + *(uint32_t*)0x200000000458 = 0x81; + *(uint32_t*)0x20000000045c = 3; + *(uint32_t*)0x200000000460 = 0; + *(uint32_t*)0x200000000464 = 0x80000001; + *(uint32_t*)0x200000000468 = 5; + *(uint32_t*)0x20000000046c = 0x54; + *(uint32_t*)0x200000000470 = 0x401; + *(uint32_t*)0x200000000474 = 9; + *(uint32_t*)0x200000000478 = 3; + *(uint32_t*)0x20000000047c = 4; + *(uint32_t*)0x200000000480 = 2; + *(uint32_t*)0x200000000484 = 1; + *(uint32_t*)0x200000000488 = 9; + *(uint32_t*)0x20000000048c = 0xed; + *(uint32_t*)0x200000000490 = 0x1f; + *(uint32_t*)0x200000000494 = 5; + *(uint32_t*)0x200000000498 = 0xd; + *(uint32_t*)0x20000000049c = 0x8001; + *(uint32_t*)0x2000000004a0 = 0xfff; + *(uint32_t*)0x2000000004a4 = 2; + *(uint32_t*)0x2000000004a8 = 0x7fffffff; + *(uint32_t*)0x2000000004ac = 0xd; + *(uint32_t*)0x2000000004b0 = 1; + *(uint32_t*)0x2000000004b4 = 0x401; + *(uint32_t*)0x2000000004b8 = 4; + *(uint32_t*)0x2000000004bc = 0xa043; + syscall(SYS_ioctl, /*fd=*/0xffffff9c, /*cmd=*/0xc1c06d01ul, + /*arg=*/0x200000000300ul); + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOCOMMAND_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0xc4e01a02 (8 bytes) + // arg: ptr[inout, ccb\$pass_cdevsw] { + // union ccb\$pass_cdevsw { + // cqa: ccb_que_ais\$pass_cdevsw { + // ccb_h: ccb_hdr\$pass_cdevsw { + // pinfo: cam_pinfo\$pass_cdevsw { + // priority: int32 = 0x2 (4 bytes) + // generation: int32 = 0x1 (4 bytes) + // index: int32 = 0x2000000 (4 bytes) + // } + // pad = 0x0 (4 bytes) + // xpt_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0xfec (8 bytes) + // priority: int32 = 0xfffffffc (4 bytes) + // pad = 0x0 (4 bytes) + // } + // sim_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x5 (8 bytes) + // priority: int32 = 0x7 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // periph_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x80 (8 bytes) + // priority: int32 = 0x2 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // retry_count: int16 = 0x1 (2 bytes) + // alloc_flags: int16 = 0x6b4 (2 bytes) + // pad = 0x0 (4 bytes) + // cbfcnp: intptr = 0x0 (8 bytes) + // func_code: int32 = 0x8 (4 bytes) + // status: int32 = 0x4 (4 bytes) + // path: intptr = 0xfffffffffffffffc (8 bytes) + // path_id: int32 = 0x9 (4 bytes) + // target_id: int32 = 0x8 (4 bytes) + // target_lun: int64 = 0x7d44 (8 bytes) + // flags: int32 = 0x1 (4 bytes) + // xflags: int32 = 0xfffffff8 (4 bytes) + // periph_priv: buffer: {69 32 82 68 e7 3f ef 85 2d 76 56 88 e5 d9 + // 10 17} (length 0x10) sim_priv: buffer: {19 45 5e bb 27 da 45 05 + // 43 c5 32 70 9e cb 83 a1} (length 0x10) qos: buffer: {f6 7d 0f 00 + // 10 00 00 00 00 32 e5 67 b7 bc 75 2d} (length 0x10) timeout: + // int32 = 0x7 (4 bytes) pad = 0x0 (4 bytes) softtimeout: timeval { + // sec: intptr = 0x5 (8 bytes) + // usec: intptr = 0x4 (8 bytes) + // } + // } + // payload: buffer: {f5 6a 42 5c 52 66 05 e3 50 a5 72 71 cd 88 ce 58 + // c0 02 3b 6e 19 28 eb 06 ee d4 11 85 f4 29 8a 46 09 8a 1d be bf 87 + // fb 73 a4 9e 3f 64 4f f0 18 b6 64 8f ab 32 a0 7b 8f 4a ba a5 02 ba + // 96 f8 1d fc} (length 0x40) + // } + // } + // } + // ] + *(uint32_t*)0x200000000240 = 2; + *(uint32_t*)0x200000000244 = 1; + *(uint32_t*)0x200000000248 = 0x2000000; + *(uint64_t*)0x200000000250 = 0xfec; + *(uint32_t*)0x200000000258 = 0xfffffffc; + *(uint64_t*)0x200000000260 = 5; + *(uint32_t*)0x200000000268 = 7; + *(uint64_t*)0x200000000270 = 0x80; + *(uint32_t*)0x200000000278 = 2; + *(uint16_t*)0x200000000280 = 1; + *(uint16_t*)0x200000000282 = 0x6b4; + *(uint64_t*)0x200000000288 = 0; + *(uint32_t*)0x200000000290 = 8; + *(uint32_t*)0x200000000294 = 4; + *(uint64_t*)0x200000000298 = 0xfffffffffffffffc; + *(uint32_t*)0x2000000002a0 = 9; + *(uint32_t*)0x2000000002a4 = 8; + *(uint64_t*)0x2000000002a8 = 0x7d44; + *(uint32_t*)0x2000000002b0 = 1; + *(uint32_t*)0x2000000002b4 = 0xfffffff8; + memcpy((void*)0x2000000002b8, + "\x69\x32\x82\x68\xe7\x3f\xef\x85\x2d\x76\x56\x88\xe5\xd9\x10\x17", + 16); + memcpy((void*)0x2000000002c8, + "\x19\x45\x5e\xbb\x27\xda\x45\x05\x43\xc5\x32\x70\x9e\xcb\x83\xa1", + 16); + memcpy((void*)0x2000000002d8, + "\xf6\x7d\x0f\x00\x10\x00\x00\x00\x00\x32\xe5\x67\xb7\xbc\x75\x2d", + 16); + *(uint32_t*)0x2000000002e8 = 7; + *(uint64_t*)0x2000000002f0 = 5; + *(uint64_t*)0x2000000002f8 = 4; + memcpy((void*)0x200000000300, + "\xf5\x6a\x42\x5c\x52\x66\x05\xe3\x50\xa5\x72\x71\xcd\x88\xce\x58\xc0" + "\x02\x3b\x6e\x19\x28\xeb\x06\xee\xd4\x11\x85\xf4\x29\x8a\x46\x09\x8a" + "\x1d\xbe\xbf\x87\xfb\x73\xa4\x9e\x3f\x64\x4f\xf0\x18\xb6\x64\x8f\xab" + "\x32\xa0\x7b\x8f\x4a\xba\xa5\x02\xba\x96\xf8\x1d\xfc", + 64); + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc4e01a02ul, + /*arg=*/0x200000000240ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller96.sh b/tools/test/stress2/misc/syzkaller96.sh new file mode 100755 index 000000000000..0cac1a93919f --- /dev/null +++ b/tools/test/stress2/misc/syzkaller96.sh @@ -0,0 +1,162 @@ +#!/bin/sh + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293898 - panic: AUX register unsupported + +# No problems seen. + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOCOMMAND_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0xc4e01a02 (8 bytes) + // arg: ptr[inout, ccb\$pass_cdevsw] { + // union ccb\$pass_cdevsw { + // cqa: ccb_que_ais\$pass_cdevsw { + // ccb_h: ccb_hdr\$pass_cdevsw { + // pinfo: cam_pinfo\$pass_cdevsw { + // priority: int32 = 0x0 (4 bytes) + // generation: int32 = 0x3 (4 bytes) + // index: int32 = 0x2000000 (4 bytes) + // } + // pad = 0x0 (4 bytes) + // xpt_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0xfea (8 bytes) + // priority: int32 = 0xfffffffb (4 bytes) + // pad = 0x0 (4 bytes) + // } + // sim_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x2 (8 bytes) + // priority: int32 = 0x6 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // periph_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x83 (8 bytes) + // priority: int32 = 0xd (4 bytes) + // pad = 0x0 (4 bytes) + // } + // retry_count: int16 = 0x1 (2 bytes) + // alloc_flags: int16 = 0x6b4 (2 bytes) + // pad = 0x0 (4 bytes) + // cbfcnp: intptr = 0x0 (8 bytes) + // func_code: int32 = 0x918 (4 bytes) + // status: int32 = 0x4 (4 bytes) + // path: intptr = 0xfffffffffffffffc (8 bytes) + // path_id: int32 = 0x9 (4 bytes) + // target_id: int32 = 0x8 (4 bytes) + // target_lun: int64 = 0x7d44 (8 bytes) + // flags: int32 = 0x1 (4 bytes) + // xflags: int32 = 0xfffffff8 (4 bytes) + // periph_priv: buffer: {69 32 82 68 e7 3f ef 85 2d 76 56 88 e5 d9 + // 10 17} (length 0x10) sim_priv: buffer: {00 00 00 00 00 00 00 00 + // 00 00 00 00 00 00 80 00} (length 0x10) qos: buffer: {f6 7d 0f 00 + // 10 00 00 00 00 32 e5 67 b7 bc 75 2d} (length 0x10) timeout: + // int32 = 0xffffffff (4 bytes) pad = 0x0 (4 bytes) softtimeout: + // timeval { + // sec: intptr = 0x5 (8 bytes) + // usec: intptr = 0x4 (8 bytes) + // } + // } + // payload: buffer: {f5 6a 42 5c 52 f4 74 e3 39 a5 05 00 00 00 ce 58 + // c0 19 28 cb 06 ee d4 11 85 f4 29 8a 46 09 8a 1d be bf 87 fb 73 a4 + // 9e 3f 64 4f f0 18 b6 64 8f ab 00 00 00 00 00 00 00 00 00 00 00 00 + // 00 00 00 00} (length 0x40) + // } + // } + // } + // ] + *(uint32_t*)0x200000000000 = 0; + *(uint32_t*)0x200000000004 = 3; + *(uint32_t*)0x200000000008 = 0x2000000; + *(uint64_t*)0x200000000010 = 0xfea; + *(uint32_t*)0x200000000018 = 0xfffffffb; + *(uint64_t*)0x200000000020 = 2; + *(uint32_t*)0x200000000028 = 6; + *(uint64_t*)0x200000000030 = 0x83; + *(uint32_t*)0x200000000038 = 0xd; + *(uint16_t*)0x200000000040 = 1; + *(uint16_t*)0x200000000042 = 0x6b4; + *(uint64_t*)0x200000000048 = 0; + *(uint32_t*)0x200000000050 = 0x918; + *(uint32_t*)0x200000000054 = 4; + *(uint64_t*)0x200000000058 = 0xfffffffffffffffc; + *(uint32_t*)0x200000000060 = 9; + *(uint32_t*)0x200000000064 = 8; + *(uint64_t*)0x200000000068 = 0x7d44; + *(uint32_t*)0x200000000070 = 1; + *(uint32_t*)0x200000000074 = 0xfffffff8; + memcpy((void*)0x200000000078, + "\x69\x32\x82\x68\xe7\x3f\xef\x85\x2d\x76\x56\x88\xe5\xd9\x10\x17", + 16); + memcpy((void*)0x200000000088, + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00", + 16); + memcpy((void*)0x200000000098, + "\xf6\x7d\x0f\x00\x10\x00\x00\x00\x00\x32\xe5\x67\xb7\xbc\x75\x2d", + 16); + *(uint32_t*)0x2000000000a8 = -1; + *(uint64_t*)0x2000000000b0 = 5; + *(uint64_t*)0x2000000000b8 = 4; + memcpy((void*)0x2000000000c0, + "\xf5\x6a\x42\x5c\x52\xf4\x74\xe3\x39\xa5\x05\x00\x00\x00\xce\x58\xc0" + "\x19\x28\xcb\x06\xee\xd4\x11\x85\xf4\x29\x8a\x46\x09\x8a\x1d\xbe\xbf" + "\x87\xfb\x73\xa4\x9e\x3f\x64\x4f\xf0\x18\xb6\x64\x8f\xab\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + 64); + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc4e01a02ul, + /*arg=*/0x200000000000ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller97.sh b/tools/test/stress2/misc/syzkaller97.sh new file mode 100755 index 000000000000..a7ad25d965f4 --- /dev/null +++ b/tools/test/stress2/misc/syzkaller97.sh @@ -0,0 +1,194 @@ +#!/bin/sh + +# panic: cam_periph_ccbwait: proceeding with incomplete ccb: ccb=0xfffff80006171800, func_code=0x3, status=0, index=-1 +# cpuid = 3 +# time = 1773850497 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe01001de7e0 +# vpanic() at vpanic+0x136/frame 0xfffffe01001de910 +# panic() at panic+0x43/frame 0xfffffe01001de970 +# cam_periph_runccb() at cam_periph_runccb+0x2ec/frame 0xfffffe01001deac0 +# passsendccb() at passsendccb+0x160/frame 0xfffffe01001deb30 +# passdoioctl() at passdoioctl+0x3a1/frame 0xfffffe01001deb80 +# passioctl() at passioctl+0x22/frame 0xfffffe01001debc0 +# devfs_ioctl() at devfs_ioctl+0xd1/frame 0xfffffe01001dec10 +# VOP_IOCTL_APV() at VOP_IOCTL_APV+0x51/frame 0xfffffe01001dec40 +# vn_ioctl() at vn_ioctl+0x160/frame 0xfffffe01001decb0 +# devfs_ioctl_f() at devfs_ioctl_f+0x1e/frame 0xfffffe01001decd0 +# kern_ioctl() at kern_ioctl+0x2a1/frame 0xfffffe01001ded40 +# sys_ioctl() at sys_ioctl+0x12f/frame 0xfffffe01001dee00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe01001def30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe01001def30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x823b1eeca, rsp = 0x820adb1c8, rbp = 0x820adb1f0 --- +# KDB: enter: panic +# [ thread pid 4950 tid 100344 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293899 - panic: cam_periph_ccbwait: proceeding with incomplete ccb: ccb=ADDR, func_code=0x3, status=NUM, index=-NUM + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$pass_pass_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 61 73 73 30 00} (length 0xb) + // } + // flags: open_flags = 0x2 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_pass_pass_cdevsw + memcpy((void*)0x200000000100, "/dev/pass0\000", 11); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000100ul, /*flags=O_RDWR*/ 2, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$CAMIOCOMMAND_pass_cdevsw arguments: [ + // fd: fd_pass_pass_cdevsw (resource) + // cmd: const = 0xc4e01a02 (8 bytes) + // arg: ptr[inout, ccb\$pass_cdevsw] { + // union ccb\$pass_cdevsw { + // cqc: ccb_query_config\$pass_cdevsw { + // ccb_h: ccb_hdr\$pass_cdevsw { + // pinfo: cam_pinfo\$pass_cdevsw { + // priority: int32 = 0x7 (4 bytes) + // generation: int32 = 0x8 (4 bytes) + // index: int32 = 0x4 (4 bytes) + // } + // pad = 0x0 (4 bytes) + // xpt_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0xfffffffffffffffe (8 bytes) + // priority: int32 = 0xd (4 bytes) + // pad = 0x0 (4 bytes) + // } + // sim_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x1000 (8 bytes) + // priority: int32 = 0x7fff (4 bytes) + // pad = 0x0 (4 bytes) + // } + // periph_links: camq_entry\$pass_cdevsw { + // links_next: intptr = 0x100000001 (8 bytes) + // priority: int32 = 0x3 (4 bytes) + // pad = 0x0 (4 bytes) + // } + // retry_count: int16 = 0x8 (2 bytes) + // alloc_flags: int16 = 0x84ce (2 bytes) + // pad = 0x0 (4 bytes) + // cbfcnp: intptr = 0xffffffff (8 bytes) + // func_code: int32 = 0x3 (4 bytes) + // status: int32 = 0x6 (4 bytes) + // path: intptr = 0x8000000000000001 (8 bytes) + // path_id: int32 = 0x3 (4 bytes) + // target_id: int32 = 0x800 (4 bytes) + // target_lun: int64 = 0x12 (8 bytes) + // flags: int32 = 0x5 (4 bytes) + // xflags: int32 = 0x8 (4 bytes) + // periph_priv: buffer: {ff 00 fc 8b be 26 59 c1 e3 be e5 97 9a b9 + // a8 da} (length 0x10) sim_priv: buffer: {bc 62 8a da 83 8f 2b 49 + // f1 67 50 3f 43 71 98 c8} (length 0x10) qos: buffer: {5e 98 6e af + // a2 b9 ac 4a 3a d1 ed 97 4e f6 f6 e2} (length 0x10) timeout: + // int32 = 0x8 (4 bytes) pad = 0x0 (4 bytes) softtimeout: timeval { + // sec: intptr = 0x4 (8 bytes) + // usec: intptr = 0x1 (8 bytes) + // } + // } + // payload: buffer: {ac f7 a5 7c b5 71 08 e5 db bd f4 df d0 16 4a 33 + // 68 b1 76 63 b8 c0 6b b7 31 4e 7d 97 28 be ee e6 5b 35 e8 8a cf a8 + // 49 62 11 9b 25 b5 fc 67 8f ef a1 44 b2 e5 a7 9b 5a 06 34 ae a0 56 + // fe 95 69 61 27 4a ba aa 92 e2 b9 ea 97 e6 1c cf 24 6b 8e 8f f7 b7 + // c8 3a cf b7 97 c8 32 12 f1 4d bc 0b 8b ef 30 11 62 5d f1 0f af c2 + // 67 76 65 be 11 2e 10 5f 65 70 58 e2 3b c2 91 99 3b 2e 00 00 00 00 + // 00 00} (length 0x80) + // } + // } + // } + // ] + *(uint32_t*)0x200000000ec0 = 7; + *(uint32_t*)0x200000000ec4 = 8; + *(uint32_t*)0x200000000ec8 = 4; + *(uint64_t*)0x200000000ed0 = 0xfffffffffffffffe; + *(uint32_t*)0x200000000ed8 = 0xd; + *(uint64_t*)0x200000000ee0 = 0x1000; + *(uint32_t*)0x200000000ee8 = 0x7fff; + *(uint64_t*)0x200000000ef0 = 0x100000001; + *(uint32_t*)0x200000000ef8 = 3; + *(uint16_t*)0x200000000f00 = 8; + *(uint16_t*)0x200000000f02 = 0x84ce; + *(uint64_t*)0x200000000f08 = 0xffffffff; + *(uint32_t*)0x200000000f10 = 3; + *(uint32_t*)0x200000000f14 = 6; + *(uint64_t*)0x200000000f18 = 0x8000000000000001; + *(uint32_t*)0x200000000f20 = 3; + *(uint32_t*)0x200000000f24 = 0x800; + *(uint64_t*)0x200000000f28 = 0x12; + *(uint32_t*)0x200000000f30 = 5; + *(uint32_t*)0x200000000f34 = 8; + memcpy((void*)0x200000000f38, + "\xff\x00\xfc\x8b\xbe\x26\x59\xc1\xe3\xbe\xe5\x97\x9a\xb9\xa8\xda", + 16); + memcpy((void*)0x200000000f48, + "\xbc\x62\x8a\xda\x83\x8f\x2b\x49\xf1\x67\x50\x3f\x43\x71\x98\xc8", + 16); + memcpy((void*)0x200000000f58, + "\x5e\x98\x6e\xaf\xa2\xb9\xac\x4a\x3a\xd1\xed\x97\x4e\xf6\xf6\xe2", + 16); + *(uint32_t*)0x200000000f68 = 8; + *(uint64_t*)0x200000000f70 = 4; + *(uint64_t*)0x200000000f78 = 1; + memcpy((void*)0x200000000f80, + "\xac\xf7\xa5\x7c\xb5\x71\x08\xe5\xdb\xbd\xf4\xdf\xd0\x16\x4a\x33\x68" + "\xb1\x76\x63\xb8\xc0\x6b\xb7\x31\x4e\x7d\x97\x28\xbe\xee\xe6\x5b\x35" + "\xe8\x8a\xcf\xa8\x49\x62\x11\x9b\x25\xb5\xfc\x67\x8f\xef\xa1\x44\xb2" + "\xe5\xa7\x9b\x5a\x06\x34\xae\xa0\x56\xfe\x95\x69\x61\x27\x4a\xba\xaa" + "\x92\xe2\xb9\xea\x97\xe6\x1c\xcf\x24\x6b\x8e\x8f\xf7\xb7\xc8\x3a\xcf" + "\xb7\x97\xc8\x32\x12\xf1\x4d\xbc\x0b\x8b\xef\x30\x11\x62\x5d\xf1\x0f" + "\xaf\xc2\x67\x76\x65\xbe\x11\x2e\x10\x5f\x65\x70\x58\xe2\x3b\xc2\x91" + "\x99\x3b\x2e\x00\x00\x00\x00\x00\x00", + 128); + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc4e01a02ul, + /*arg=*/0x200000000ec0ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller98.sh b/tools/test/stress2/misc/syzkaller98.sh new file mode 100755 index 000000000000..f74d00e0934d --- /dev/null +++ b/tools/test/stress2/misc/syzkaller98.sh @@ -0,0 +1,268 @@ +#!/bin/sh + +# 806.906239 [ 653] nm_os_extmem_delete freeing 1000000 bytes +# panic: dst_m 0xfffffe00130fd920 is not wired +# cpuid = 7 +# time = 1773855806 +# KDB: stack backtrace: +# db_trace_self_wrapper() at db_trace_self_wrapper+0x2b/frame 0xfffffe010022eb00 +# vpanic() at vpanic+0x136/frame 0xfffffe010022ec30 +# panic() at panic+0x43/frame 0xfffffe010022ec90 +# vm_fault_copy_entry() at vm_fault_copy_entry+0x54e/frame 0xfffffe010022ed60 +# vm_map_protect() at vm_map_protect+0x714/frame 0xfffffe010022edf0 +# sys_mprotect() at sys_mprotect+0x9f/frame 0xfffffe010022ee00 +# amd64_syscall() at amd64_syscall+0x169/frame 0xfffffe010022ef30 +# fast_syscall_common() at fast_syscall_common+0xf8/frame 0xfffffe010022ef30 +# --- syscall (0, FreeBSD ELF64, syscall), rip = 0x822fa9eca, rsp = 0x820c270e8, rbp = 0x820c27110 --- +# KDB: enter: panic +# [ thread pid 4510 tid 100369 ] +# Stopped at kdb_enter+0x33: movq $0,0x15e9d32(%rip) +# db> x/s version +# version: FreeBSD 16.0-CURRENT #0 main-n284537-a8b9a05d3cad-dirty: Tue Mar 17 09:39:44 CET 2026 +# pho@mercat1.netperf.freebsd.org:/usr/src/sys/amd64/compile/PHO +# db> + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293900 - panic: dst_m ADDR is not wired + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // mlock arguments: [ + // addr: VMA[0x1000] + // size: len = 0x1000 (8 bytes) + // ] + syscall(SYS_mlock, /*addr=*/0x200000ffc000ul, /*size=*/0x1000ul); + // munlock arguments: [ + // addr: VMA[0x4000] + // size: len = 0x4000 (8 bytes) + // ] + syscall(SYS_munlock, /*addr=*/0x200000ff9000ul, /*size=*/0x4000ul); + // fork arguments: [ + // ] + // returns pid + syscall(SYS_fork); + // mkdir arguments: [ + // path: ptr[in, buffer] { + // buffer: {2e 2f 66 69 6c 65 30 00} (length 0x8) + // } + // mode: open_mode = 0x109 (8 bytes) + // ] + memcpy((void*)0x200000000300, "./file0\000", 8); + syscall(SYS_mkdir, /*path=*/0x200000000300ul, + /*mode=S_IXOTH|S_IXGRP|S_IRUSR*/ 0x109ul); + // mprotect arguments: [ + // addr: VMA[0x4000] + // len: len = 0x4000 (8 bytes) + // prot: mmap_prot = 0x4 (8 bytes) + // ] + syscall(SYS_mprotect, /*addr=*/0x200000ffc000ul, /*len=*/0x4000ul, + /*prot=PROT_EXEC*/ 4ul); + // mlock arguments: [ + // addr: VMA[0x4000] + // size: len = 0x4000 (8 bytes) + // ] + syscall(SYS_mlock, /*addr=*/0x200000ffb000ul, /*size=*/0x4000ul); + // mount\$nfs_newnfs_vnodeops_nosig arguments: [ + // fstype: nil + // dir: nil + // mnt_flags: mount_flags = 0x58000000 (4 bytes) + // data: ptr[in, nfs_args\$newnfs_vnodeops_nosig] { + // nfs_args\$newnfs_vnodeops_nosig { + // version: const = 0x3 (4 bytes) + // pad = 0x0 (4 bytes) + // addr: nil + // addrlen: len = 0x0 (4 bytes) + // sotype: sock_type_newnfs_vnodeops_nosig = 0x4 (4 bytes) + // proto: int32 = 0xb (4 bytes) + // pad = 0x0 (4 bytes) + // fh: nil + // fhsize: len = 0x0 (4 bytes) + // nfs_flags: nfs_mount_flags_newnfs_vnodeops_nosig = 0x8 (4 bytes) + // wsize: int32 = 0x5 (4 bytes) + // rsize: int32 = 0x6 (4 bytes) + // readdirsize: int32 = 0x0 (4 bytes) + // timeo: int32 = 0x8 (4 bytes) + // retrans: int32 = 0x1 (4 bytes) + // maxgrouplist: int32 = 0x4 (4 bytes) + // readahead: int32 = 0x800 (4 bytes) + // wcommitsize: int32 = 0x4 (4 bytes) + // deadthresh: int32 = 0x200 (4 bytes) + // pad = 0x0 (4 bytes) + // hostname: nil + // acregmin: int32 = 0x80 (4 bytes) + // acregmax: int32 = 0x1 (4 bytes) + // acdirmin: int32 = 0x2 (4 bytes) + // acdirmax: int32 = 0xa92 (4 bytes) + // } + // } + // ] + *(uint32_t*)0x200000000240 = 3; + *(uint64_t*)0x200000000248 = 0; + *(uint32_t*)0x200000000250 = 0; + *(uint32_t*)0x200000000254 = 4; + *(uint32_t*)0x200000000258 = 0xb; + *(uint64_t*)0x200000000260 = 0; + *(uint32_t*)0x200000000268 = 0; + *(uint32_t*)0x20000000026c = 8; + *(uint32_t*)0x200000000270 = 5; + *(uint32_t*)0x200000000274 = 6; + *(uint32_t*)0x200000000278 = 0; + *(uint32_t*)0x20000000027c = 8; + *(uint32_t*)0x200000000280 = 1; + *(uint32_t*)0x200000000284 = 4; + *(uint32_t*)0x200000000288 = 0x800; + *(uint32_t*)0x20000000028c = 4; + *(uint32_t*)0x200000000290 = 0x200; + *(uint64_t*)0x200000000298 = 0; + *(uint32_t*)0x2000000002a0 = 0x80; + *(uint32_t*)0x2000000002a4 = 1; + *(uint32_t*)0x2000000002a8 = 2; + *(uint32_t*)0x2000000002ac = 0xa92; + syscall(SYS_mount, /*fstype=*/0ul, /*dir=*/0ul, + /*mnt_flags=MNT_ACLS|MNT_NOCLUSTERR|MNT_NOATIME*/ 0x58000000, + /*data=*/0x200000000240ul); + // openat\$netmap_netmap_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 6e 65 74 6d 61 70 00} (length 0xc) + // } + // flags: open_flags = 0x8 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_netmap_netmap_cdevsw + memcpy((void*)0x200000000080, "/dev/netmap\000", 12); + res = syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, + /*file=*/0x200000000080ul, /*flags=O_APPEND*/ 8, /*mode=*/0); + if (res != -1) + r[0] = res; + // ioctl\$NIOCCTRL_netmap_cdevsw arguments: [ + // fd: fd_netmap_netmap_cdevsw (resource) + // cmd: const = 0xc0586997 (8 bytes) + // arg: ptr[inout, nmreq_header\$netmap_cdevsw] { + // nmreq_header\$netmap_cdevsw { + // nr_version: const = 0xe (2 bytes) + // nr_reqtype: netmap_req_types_netmap_cdevsw = 0x1 (2 bytes) + // nr_reserved: const = 0x0 (4 bytes) + // nr_name: buffer: {fd dc df f0 57 4f 3c 7c e4 5f 8c a0 60 dd 3e f8 85 + // 76 39 53 90 de 06 ef fd a7 de 31 18 a2 d4 3a c7 d3 2a a5 0a c1 17 23 + // 6a fe eb 89 29 84 f2 62 d2 83 53 b7 67 c7 b2 ee 8c 39 68 f1 3f 73 52 + // b4} (length 0x40) nr_options: ptr[inout, + // nmreq_option_types\$netmap_cdevsw] { + // union nmreq_option_types\$netmap_cdevsw { + // kloop_fds: nmreq_opt_sync_kloop_eventfds\$netmap_cdevsw { + // nro_next: ptr[in, nmreq_option_types\$netmap_cdevsw] { + // union nmreq_option_types\$netmap_cdevsw { + // extmem: nmreq_opt_extmem\$netmap_cdevsw { + // nro_next: nil + // nro_reqtype: const = 0x1 (4 bytes) + // nro_status: int32 = 0x80000000 (4 bytes) + // nro_size: len = 0x28 (8 bytes) + // nro_usrptr: VMA[0x3000] + // nro_info: int32 = 0x100000 (4 bytes) + // nro_size_ptr: len = 0x3000 (4 bytes) + // } + // } + // } + // nro_reqtype: const = 0x2 (4 bytes) + // nro_status: int32 = 0x0 (4 bytes) + // nro_size: len = 0x18 (8 bytes) + // eventfds: + // array[nmreq_opt_sync_kloop_eventfd_pair\$netmap_cdevsw] { + // } + // } + // } + // } + // nr_body: ptr[inout, nmreq_body\$netmap_cdevsw] { + // union nmreq_body\$netmap_cdevsw { + // reg: nmreq_register\$netmap_cdevsw { + // nr_name: buffer: {c4 c0 99 4a 5e 6e 71 96 98 b6 cc 78 3c 37 aa + // 7c} (length 0x10) nr_mode: nmreq_register_mode_netmap_cdevsw = + // 0x1 (4 bytes) nr_ringid: nmreq_register_ringid_netmap_cdevsw = + // 0x2000 (4 bytes) nr_flags: nmreq_register_flags_netmap_cdevsw + // = 0x0 (4 bytes) nr_mem_id: int32 = 0x9 (4 bytes) nr_spare: + // buffer: {00 00 00 00 00 00 00 00} (length 0x8) + // } + // } + // } + // } + // } + // ] + *(uint16_t*)0x200000000140 = 0xe; + *(uint16_t*)0x200000000142 = 1; + *(uint32_t*)0x200000000144 = 0; + memcpy((void*)0x200000000148, + "\xfd\xdc\xdf\xf0\x57\x4f\x3c\x7c\xe4\x5f\x8c\xa0\x60\xdd\x3e\xf8\x85" + "\x76\x39\x53\x90\xde\x06\xef\xfd\xa7\xde\x31\x18\xa2\xd4\x3a\xc7\xd3" + "\x2a\xa5\x0a\xc1\x17\x23\x6a\xfe\xeb\x89\x29\x84\xf2\x62\xd2\x83\x53" + "\xb7\x67\xc7\xb2\xee\x8c\x39\x68\xf1\x3f\x73\x52\xb4", + 64); + *(uint64_t*)0x200000000188 = 0x200000000340; + *(uint64_t*)0x200000000340 = 0x200000000240; + *(uint64_t*)0x200000000240 = 0; + *(uint32_t*)0x200000000248 = 1; + *(uint32_t*)0x20000000024c = 0x80000000; + *(uint64_t*)0x200000000250 = 0x28; + *(uint64_t*)0x200000000258 = 0x200000ffa000; + *(uint32_t*)0x200000000260 = 0x100000; + *(uint32_t*)0x200000000264 = 0x3000; + *(uint32_t*)0x200000000348 = 2; + *(uint64_t*)0x200000000350 = 0x18; + *(uint64_t*)0x200000000190 = 0x200000000040; + memcpy((void*)0x200000000040, + "\xc4\xc0\x99\x4a\x5e\x6e\x71\x96\x98\xb6\xcc\x78\x3c\x37\xaa\x7c", + 16); + *(uint32_t*)0x200000000050 = 1; + *(uint32_t*)0x200000000054 = 0x2000; + *(uint32_t*)0x200000000058 = 0; + *(uint32_t*)0x20000000005c = 9; + memset((void*)0x200000000060, 0, 8); + syscall(SYS_ioctl, /*fd=*/r[0], /*cmd=*/0xc0586997ul, + /*arg=*/0x200000000140ul); + // mprotect arguments: [ + // addr: VMA[0x4000] + // len: len = 0x4000 (8 bytes) + // prot: mmap_prot = 0x6 (8 bytes) + // ] + syscall(SYS_mprotect, /*addr=*/0x200000ffc000ul, /*len=*/0x4000ul, + /*prot=PROT_WRITE|PROT_EXEC*/ 6ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/tools/test/stress2/misc/syzkaller99.sh b/tools/test/stress2/misc/syzkaller99.sh new file mode 100755 index 000000000000..290889e39e2f --- /dev/null +++ b/tools/test/stress2/misc/syzkaller99.sh @@ -0,0 +1,143 @@ +#!/bin/sh + +# Reproducer obtained from: Jiaming Zhang <r772577952@gmail.com> +# Bug 293901 - panic: mutex ACPI global lock owned at ../../../kern/kern_event.c:LINE + +# No problems seen. + +[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 + +. ../default.cfg +set -u +prog=$(basename "$0" .sh) +cat > /tmp/$prog.c <<EOF +// autogenerated by syzkaller (https://github.com/google/syzkaller) + +#define _GNU_SOURCE + +#include <pwd.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/endian.h> +#include <sys/syscall.h> +#include <unistd.h> + +uint64_t r[1] = {0xffffffffffffffff}; + +int main(void) +{ + syscall(SYS_mmap, /*addr=*/0x200000000000ul, /*len=*/0x1000000ul, + /*prot=PROT_WRITE|PROT_READ|PROT_EXEC*/ 7ul, + /*flags=MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE*/ 0x1012ul, + /*fd=*/(intptr_t)-1, /*offset=*/0ul); + const char* reason; + (void)reason; + intptr_t res = 0; + if (write(1, "executing program\n", sizeof("executing program\n") - 1)) { + } + // openat\$bpf arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 62 70 66 00} (length 0x9) + // } + // flags: open_flags = 0x80000 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_bpf + memcpy((void*)0x200000000000, "/dev/bpf\000", 9); + syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, /*file=*/0x200000000000ul, + /*flags=O_TTY_INIT*/ 0x80000, /*mode=*/0); + // openat\$consolectl_consolectl_devsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 63 6f 6e 73 6f 6c 65 63 74 6c 00} (length + // 0x10) + // } + // flags: open_flags = 0x400000 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd + memcpy((void*)0x200000000740, "/dev/consolectl\000", 16); + syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, /*file=*/0x200000000740ul, + /*flags=O_PATH*/ 0x400000, /*mode=*/0); + // openat\$pvclock_pvclock_cdev_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // path: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 70 76 63 6c 6f 63 6b 00} (length 0xd) + // } + // flags: open_flags = 0x400000 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd + memcpy((void*)0x200000000d00, "/dev/pvclock\000", 13); + syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, /*path=*/0x200000000d00ul, + /*flags=O_PATH*/ 0x400000, /*mode=*/0); + // openat\$apm_apm_cdevsw arguments: [ + // fd: const = 0xffffffffffffff9c (8 bytes) + // file: ptr[in, buffer] { + // buffer: {2f 64 65 76 2f 61 70 6d 00} (length 0x9) + // } + // flags: open_flags = 0x2000000 (4 bytes) + // mode: const = 0x0 (4 bytes) + // ] + // returns fd_apm_apm_cdevsw + memcpy((void*)0x200000000b40, "/dev/apm\000", 9); + syscall(SYS_openat, /*fd=*/0xffffffffffffff9cul, /*file=*/0x200000000b40ul, + /*flags=O_EMPTY_PATH*/ 0x2000000, /*mode=*/0); + // kqueue arguments: [ + // ] + // returns kqueue + res = syscall(SYS_kqueue); + if (res != -1) + r[0] = res; + // kevent arguments: [ + // kqueue: kqueue (resource) + // changelist: ptr[in, array[kevent]] { + // array[kevent] { + // kevent { + // ident: intptr = 0x6 (8 bytes) + // filter: filters = 0xfffffffffffffff3 (2 bytes) + // flags: evflags = 0x4035 (2 bytes) + // fflags: fflags = 0x0 (4 bytes) + // data: int64 = 0x5 (8 bytes) + // udata: intptr = 0x40000000007 (8 bytes) + // ext: array[int64] { + // int64 = 0x4 (8 bytes) + // int64 = 0x100000000 (8 bytes) + // int64 = 0x4 (8 bytes) + // int64 = 0x5 (8 bytes) + // } + // } + // } + // } + // nchanges: len = 0x1 (8 bytes) + // eventlist: nil + // nevents: len = 0x0 (8 bytes) + // timeout: nil + // ] + *(uint64_t*)0x200000000400 = 6; + *(uint16_t*)0x200000000408 = 0xfff3; + *(uint16_t*)0x20000000040a = 0x4035; + *(uint32_t*)0x20000000040c = 0; + *(uint64_t*)0x200000000410 = 5; + *(uint64_t*)0x200000000418 = 0x40000000007; + *(uint64_t*)0x200000000420 = 4; + *(uint64_t*)0x200000000428 = 0x100000000; + *(uint64_t*)0x200000000430 = 4; + *(uint64_t*)0x200000000438 = 5; + syscall(SYS_kevent, /*kqueue=*/r[0], /*changelist=*/0x200000000400ul, + /*nchanges=*/1ul, /*eventlist=*/0ul, /*nevents=*/0ul, + /*timeout=*/0ul); + return 0; +} +EOF +mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c || exit 1 + +timeout 3m /tmp/$prog > /dev/null 2>&1 + +rm -rf /tmp/$prog /tmp/$prog.c /tmp/$prog.core +exit 0 diff --git a/usr.bin/diff/diffreg.c b/usr.bin/diff/diffreg.c index 8dcf55a7190b..5c1147bf2730 100644 --- a/usr.bin/diff/diffreg.c +++ b/usr.bin/diff/diffreg.c @@ -542,6 +542,11 @@ opentemp(const char *f) return (NULL); } } + if (nread == -1) { + close(ifd); + close(ofd); + return (NULL); + } close(ifd); lseek(ofd, (off_t)0, SEEK_SET); return (fdopen(ofd, "r")); diff --git a/usr.bin/diff/pr.c b/usr.bin/diff/pr.c index 189e6b34649e..e8a4162d8b18 100644 --- a/usr.bin/diff/pr.c +++ b/usr.bin/diff/pr.c @@ -29,8 +29,10 @@ #include <err.h> #include <errno.h> +#include <fcntl.h> #include <paths.h> #include <signal.h> +#include <spawn.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> @@ -41,6 +43,8 @@ #define _PATH_PR "/usr/bin/pr" +extern char **environ; + struct pr * start_pr(char *file1, char *file2) { @@ -48,40 +52,49 @@ start_pr(char *file1, char *file2) pid_t pid; char *header; struct pr *pr; + posix_spawn_file_actions_t fa; + posix_spawnattr_t sa; + int error; pr = xcalloc(1, sizeof(*pr)); xasprintf(&header, "%s %s %s", diffargs, file1, file2); signal(SIGPIPE, SIG_IGN); fflush(stdout); - if (pipe(pfd) == -1) + if (pipe2(pfd, O_CLOEXEC) == -1) err(2, "pipe"); - switch ((pid = pdfork(&pr->procd, PD_CLOEXEC))) { - case -1: - err(2, "No more processes"); - case 0: - /* child */ - if (pfd[0] != STDIN_FILENO) { - dup2(pfd[0], STDIN_FILENO); - close(pfd[0]); + + if ((error = posix_spawnattr_init(&sa)) != 0) + errc(2, error, "posix_spawnattr_init"); + if ((error = posix_spawn_file_actions_init(&fa)) != 0) + errc(2, error, "posix_spawn_file_actions_init"); + + posix_spawnattr_setprocdescp_np(&sa, &pr->procd, 0); + + if (pfd[0] != STDIN_FILENO) + posix_spawn_file_actions_adddup2(&fa, pfd[0], STDIN_FILENO); + + char *argv[] = { __DECONST(char *, _PATH_PR), + __DECONST(char *, "-h"), header, NULL }; + error = posix_spawn(&pid, _PATH_PR, &fa, &sa, argv, environ); + if (error != 0) + errc(2, error, "could not spawn pr"); + + posix_spawn_file_actions_destroy(&fa); + posix_spawnattr_destroy(&sa); + + /* parent */ + if (pfd[1] == STDOUT_FILENO) { + pr->ostdout = STDOUT_FILENO; + } else { + if ((pr->ostdout = dup(STDOUT_FILENO)) < 0 || + dup2(pfd[1], STDOUT_FILENO) < 0) { + err(2, "stdout"); } close(pfd[1]); - execl(_PATH_PR, _PATH_PR, "-h", header, (char *)0); - _exit(127); - default: - /* parent */ - if (pfd[1] == STDOUT_FILENO) { - pr->ostdout = STDOUT_FILENO; - } else { - if ((pr->ostdout = dup(STDOUT_FILENO)) < 0 || - dup2(pfd[1], STDOUT_FILENO) < 0) { - err(2, "stdout"); - } - close(pfd[1]); - } - close(pfd[0]); - free(header); } + close(pfd[0]); + free(header); return (pr); } diff --git a/usr.bin/diff3/diff3.c b/usr.bin/diff3/diff3.c index d85a5da94b10..bbef1f0d21a5 100644 --- a/usr.bin/diff3/diff3.c +++ b/usr.bin/diff3/diff3.c @@ -73,14 +73,17 @@ #include <capsicum_helpers.h> #include <ctype.h> #include <err.h> +#include <fcntl.h> #include <getopt.h> #include <inttypes.h> #include <limits.h> +#include <spawn.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> +extern char **environ; /* * "from" is first in range of changed lines; "to" is last+1 * from=to=line after point of insertion for added lines. @@ -301,23 +304,27 @@ readin(int fd, struct diff **dd) } static int -diffexec(const char *diffprog, char **diffargv, int fd[]) +diffexec(char **diffargv, int fd[]) { - int pd; - - switch (pdfork(&pd, PD_CLOEXEC)) { - case 0: - close(fd[0]); - if (dup2(fd[1], STDOUT_FILENO) == -1) - err(2, "child could not duplicate descriptor"); - close(fd[1]); - execvp(diffprog, diffargv); - err(2, "could not execute diff: %s", diffprog); - break; - case -1: - err(2, "could not fork"); - break; - } + posix_spawnattr_t sa; + posix_spawn_file_actions_t fa; + pid_t pid; + int pd, error; + + if ((error = posix_spawnattr_init(&sa)) != 0) + errc(2, error, "posix_spawnattr_init"); + if ((error = posix_spawn_file_actions_init(&fa)) != 0) + errc(2, error, "posix_spawn_file_actions_init"); + + posix_spawnattr_setprocdescp_np(&sa, &pd, 0); + posix_spawn_file_actions_adddup2(&fa, fd[1], STDOUT_FILENO); + + error = posix_spawn(&pid, diffargv[0], &fa, &sa, diffargv, environ); + if (error != 0) + errc(2, error, "could not spawn diff"); + + posix_spawn_file_actions_destroy(&fa); + posix_spawnattr_destroy(&sa); close(fd[1]); return (pd); } @@ -937,6 +944,7 @@ wait_and_check(int pd) if (errno != EINTR) err(2, "pdwait"); } + close(pd); if (WIFEXITED(status) && WEXITSTATUS(status) >= 2) errx(2, "diff exited abnormally"); @@ -1004,7 +1012,7 @@ main(int argc, char **argv) eflag = EFLAG_OVERLAP; break; case DIFFPROG_OPT: - diffprog = optarg; + diffargv[0] = optarg; break; case STRIPCR_OPT: strip_cr = 1; @@ -1074,18 +1082,18 @@ main(int argc, char **argv) if (caph_rights_limit(fileno(fp[2]), &rights_ro) < 0) err(2, "unable to limit rights on: %s", file3); - if (pipe(fd13)) + if (pipe2(fd13, O_CLOEXEC)) err(2, "pipe"); - if (pipe(fd23)) + if (pipe2(fd23, O_CLOEXEC)) err(2, "pipe"); diffargv[diffargc] = file1; diffargv[diffargc + 1] = file3; diffargv[diffargc + 2] = NULL; - pd13 = diffexec(diffprog, diffargv, fd13); + pd13 = diffexec(diffargv, fd13); diffargv[diffargc] = file2; - pd23 = diffexec(diffprog, diffargv, fd23); + pd23 = diffexec(diffargv, fd23); caph_cache_catpages(); if (caph_enter() < 0) diff --git a/usr.bin/m4/eval.c b/usr.bin/m4/eval.c index 0963a61a2914..4f088e0415e0 100644 --- a/usr.bin/m4/eval.c +++ b/usr.bin/m4/eval.c @@ -60,17 +60,17 @@ static void dodefn(const char *); static void dopushdef(const char *, const char *); -static void dodump(const char *[], int); +static void dodumpdef(const char *[], int); static void dotrace(const char *[], int, int); static void doifelse(const char *[], int); -static int doincl(const char *); +static int doinclude(const char *); static int dopaste(const char *); -static void dochq(const char *[], int); -static void dochc(const char *[], int); +static void dochangequote(const char *[], int); +static void dochangecom(const char *[], int); static void dom4wrap(const char *); -static void dodiv(int); -static void doundiv(const char *[], int); -static void dosub(const char *[], int); +static void dodivert(int); +static void doundivert(const char *[], int); +static void dosubstr(const char *[], int); static void map(char *, const char *, const char *, const char *); static const char *handledash(char *, char *, const char *); static void expand_builtin(const char *[], int, int); @@ -108,7 +108,7 @@ eval(const char *argv[], int argc, int td, int is_traced) m4errx(1, "expanding recursive definition for %s.", argv[1]); if (is_traced) mark = trace(argv, argc, infile+ilevel); - if (td == MACRTYPE) + if (td == MACROTYPE) expand_macro(argv, argc); else expand_builtin(argv, argc, td); @@ -149,18 +149,18 @@ expand_builtin(const char *argv[], int argc, int td) switch (td & TYPEMASK) { - case DEFITYPE: + case DEFINETYPE: if (argc > 2) dodefine(argv[2], (argc > 3) ? argv[3] : null); break; - case PUSDTYPE: + case PUSHDEFTYPE: if (argc > 2) dopushdef(argv[2], (argc > 3) ? argv[3] : null); break; - case DUMPTYPE: - dodump(argv, argc); + case DUMPDEFTYPE: + dodumpdef(argv, argc); break; case TRACEONTYPE: @@ -171,10 +171,9 @@ expand_builtin(const char *argv[], int argc, int td) dotrace(argv, argc, 0); break; - case EXPRTYPE: + case EVALTYPE: /* - * doexpr - evaluate arithmetic - * expression + * doeval - evaluate arithmetic expression */ { int base = 10; @@ -184,14 +183,14 @@ expand_builtin(const char *argv[], int argc, int td) if (argc > 3 && *argv[3] != '\0') { base = strtonum(argv[3], 2, 36, &errstr); if (errstr) { - m4errx(1, "expr: base is %s: %s.", + m4errx(1, "eval: base is %s: %s.", errstr, argv[3]); } } if (argc > 4) { mindigits = strtonum(argv[4], 0, INT_MAX, &errstr); if (errstr) { - m4errx(1, "expr: mindigits is %s: %s.", + m4errx(1, "eval: mindigits is %s: %s.", errstr, argv[4]); } } @@ -200,15 +199,14 @@ expand_builtin(const char *argv[], int argc, int td) break; } - case IFELTYPE: + case IFELSETYPE: doifelse(argv, argc); break; - case IFDFTYPE: + case IFDEFTYPE: /* - * doifdef - select one of two - * alternatives based on the existence of - * another definition + * doifdef - select one of two alternatives based + * on the existence of another definition */ if (argc > 3) { if (lookup_macro_definition(argv[2]) != NULL) @@ -218,18 +216,16 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case LENGTYPE: + case LENTYPE: /* - * dolen - find the length of the - * argument + * dolen - find the length of the argument */ pbnum((argc > 2) ? strlen(argv[2]) : 0); break; case INCRTYPE: /* - * doincr - increment the value of the - * argument + * doincr - increment the value of the argument */ if (argc > 2) { n = strtonum(argv[2], INT_MIN, INT_MAX-1, &errstr); @@ -242,8 +238,7 @@ expand_builtin(const char *argv[], int argc, int td) case DECRTYPE: /* - * dodecr - decrement the value of the - * argument + * dodecr - decrement the value of the argument */ if (argc > 2) { n = strtonum(argv[2], INT_MIN+1, INT_MAX, &errstr); @@ -254,9 +249,9 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case SYSCTYPE: + case SYSCMDTYPE: /* - * dosys - execute system command + * dosyscmd - execute system command */ if (argc > 2) { fflush(stdout); @@ -264,10 +259,9 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case SYSVTYPE: + case SYSVALTYPE: /* - * dosysval - return value of the last - * system call. + * dosysval - return value of the last system call. * */ pbnum(sysval); @@ -277,9 +271,9 @@ expand_builtin(const char *argv[], int argc, int td) if (argc > 2) doesyscmd(argv[2]); break; - case INCLTYPE: + case INCLUDETYPE: if (argc > 2) { - if (!doincl(argv[2])) { + if (!doinclude(argv[2])) { if (mimic_gnu) { warn("%s at line %lu: include(%s)", CURRENT_NAME, CURRENT_LINE, argv[2]); @@ -295,19 +289,20 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case SINCTYPE: + case SINCLUDETYPE: + /* like include, but don't error out if file not found */ if (argc > 2) - (void) doincl(argv[2]); + (void) doinclude(argv[2]); break; #ifdef EXTENDED - case PASTTYPE: + case PASTETYPE: if (argc > 2) if (!dopaste(argv[2])) - err(1, "%s at line %lu: paste(%s)", + err(1, "%s at line %lu: paste(%s)", CURRENT_NAME, CURRENT_LINE, argv[2]); break; - case SPASTYPE: + case SPASTETYPE: if (argc > 2) (void) dopaste(argv[2]); break; @@ -315,28 +310,27 @@ expand_builtin(const char *argv[], int argc, int td) doformat(argv, argc); break; #endif - case CHNQTYPE: - dochq(argv, ac); + case CHANGEQUOTETYPE: + dochangequote(argv, ac); break; - case CHNCTYPE: - dochc(argv, argc); + case CHANGECOMTYPE: + dochangecom(argv, argc); break; - case SUBSTYPE: + case SUBSTRTYPE: /* - * dosub - select substring + * dosubstr - select substring * */ if (argc > 3) - dosub(argv, argc); + dosubstr(argv, argc); break; - case SHIFTYPE: + case SHIFTTYPE: /* - * doshift - push back all arguments - * except the first one (i.e. skip - * argv[2]) + * doshift - push back all arguments except the first one + * (i.e. skip argv[2]) */ if (argc > 3) { for (n = argc - 1; n > 3; n--) { @@ -351,57 +345,55 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case DIVRTYPE: + case DIVERTTYPE: if (argc > 2) { n = strtonum(argv[2], INT_MIN, INT_MAX, &errstr); if (errstr) m4errx(1, "divert: argument is %s: %s.", errstr, argv[2]); if (n != 0) { - dodiv(n); - break; + dodivert(n); + break; } } active = stdout; oindex = 0; break; - case UNDVTYPE: - doundiv(argv, argc); + case UNDIVERTTYPE: + doundivert(argv, argc); break; - case DIVNTYPE: + case DIVNUMTYPE: /* - * dodivnum - return the number of - * current output diversion + * dodivnum - return the number of current output diversion */ pbnum(oindex); break; - case UNDFTYPE: + case UNDEFINETYPE: /* - * doundefine - undefine a previously - * defined macro(s) or m4 keyword(s). + * doundefine - undefine a previously defined macro(s) or m4 + * keyword(s). */ if (argc > 2) for (n = 2; n < argc; n++) macro_undefine(argv[n]); break; - case POPDTYPE: + case POPDEFTYPE: /* - * dopopdef - remove the topmost - * definitions of macro(s) or m4 - * keyword(s). + * dopopdef - remove the topmost definitions of macro(s) + * or m4 keyword(s). */ if (argc > 2) for (n = 2; n < argc; n++) macro_popdef(argv[n]); break; - case MKTMTYPE: + case MKSTEMPTYPE: /* - * dotemp - create a temporary file + * domkstemp - safely create a temporary file */ if (argc > 2) { int fd; @@ -420,11 +412,10 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case TRNLTYPE: + case TRANSLITTYPE: /* - * dotranslit - replace all characters in - * the source string that appears in the - * "from" string with the corresponding + * dotranslit - replace all characters in the source string + * that appear in the "from" string with the corresponding * characters in the "to" string. */ if (argc > 3) { @@ -441,19 +432,17 @@ expand_builtin(const char *argv[], int argc, int td) pbstr(argv[2]); break; - case INDXTYPE: + case INDEXTYPE: /* - * doindex - find the index of the second - * argument string in the first argument - * string. -1 if not present. + * doindex - find the index of the second argument string + * in the first argument string. -1 if not present. */ - pbnum((argc > 3) ? indx(argv[2], argv[3]) : -1); + pbnum((argc > 3) ? doindex(argv[2], argv[3]) : -1); break; - case ERRPTYPE: + case ERRPRINTTYPE: /* - * doerrp - print the arguments to stderr - * file + * doerrprint - print the arguments to stderr */ if (argc > 2) { for (n = 2; n < argc; n++) @@ -462,16 +451,15 @@ expand_builtin(const char *argv[], int argc, int td) } break; - case DNLNTYPE: + case DNLTYPE: /* - * dodnl - eat-up-to and including - * newline + * dodnl - eat-up-to and including newline */ while ((c = gpbc()) != '\n' && c != EOF) ; break; - case M4WRTYPE: + case M4WRAPTYPE: /* * dom4wrap - set up for * wrap-up/wind-down activity @@ -480,9 +468,9 @@ expand_builtin(const char *argv[], int argc, int td) dom4wrap(argv[2]); break; - case EXITTYPE: + case M4EXITTYPE: /* - * doexit - immediate exit from m4. + * dom4exit - immediate exit from m4. */ killdiv(); exit((argc > 2) ? atoi(argv[2]) : 0); @@ -504,7 +492,7 @@ expand_builtin(const char *argv[], int argc, int td) dobuiltin(argv, argc); break; - case PATSTYPE: + case PATSUBSTTYPE: if (argc > 2) dopatsubst(argv, argc); break; @@ -625,7 +613,7 @@ dodefn(const char *name) struct macro_definition *p; if ((p = lookup_macro_definition(name)) != NULL) { - if ((p->type & TYPEMASK) == MACRTYPE) { + if ((p->type & TYPEMASK) == MACROTYPE) { pbstr(rquote); pbstr(p->defn); pbstr(lquote); @@ -661,7 +649,7 @@ dump_one_def(const char *name, struct macro_definition *p) if (!traceout) traceout = stderr; if (mimic_gnu) { - if ((p->type & TYPEMASK) == MACRTYPE) + if ((p->type & TYPEMASK) == MACROTYPE) fprintf(traceout, "%s:\t%s\n", name, p->defn); else { fprintf(traceout, "%s:\t<%s>\n", name, p->defn); @@ -676,7 +664,7 @@ dump_one_def(const char *name, struct macro_definition *p) * hash table is dumped. */ static void -dodump(const char *argv[], int argc) +dodumpdef(const char *argv[], int argc) { int n; struct macro_definition *p; @@ -728,7 +716,7 @@ doifelse(const char *argv[], int argc) * doinclude - include a given file. */ static int -doincl(const char *ifile) +doinclude(const char *ifile) { if (ilevel + 1 == MAXINP) m4errx(1, "too many include files."); @@ -765,10 +753,10 @@ dopaste(const char *pfile) #endif /* - * dochq - change quote characters + * dochangequote - change quote characters */ static void -dochq(const char *argv[], int ac) +dochangequote(const char *argv[], int ac) { if (ac == 2) { lquote[0] = LQUOTE; lquote[1] = EOS; @@ -784,10 +772,10 @@ dochq(const char *argv[], int ac) } /* - * dochc - change comment characters + * dochangecom - change comment characters */ static void -dochc(const char *argv[], int argc) +dochangecom(const char *argv[], int argc) { /* XXX Note that there is no difference between no argument and a single * empty argument. @@ -826,7 +814,7 @@ dom4wrap(const char *text) * dodivert - divert the output to a temporary file */ static void -dodiv(int n) +dodivert(int n) { int fd; @@ -856,7 +844,7 @@ dodiv(int n) * other outputs, in numerical order. */ static void -doundiv(const char *argv[], int argc) +doundivert(const char *argv[], int argc) { int ind; int n; @@ -881,10 +869,10 @@ doundiv(const char *argv[], int argc) } /* - * dosub - select substring + * dosubstr - select substring */ static void -dosub(const char *argv[], int argc) +dosubstr(const char *argv[], int argc) { const char *ap, *fc, *k; int nc; @@ -912,11 +900,11 @@ dosub(const char *argv[], int argc) * map every character of s1 that is specified in from * into s3 and replace in s. (source s1 remains untouched) * - * This is derived from the a standard implementation of map(s,from,to) - * function of ICON language. Within mapvec, we replace every character - * of "from" with the corresponding character in "to". - * If "to" is shorter than "from", than the corresponding entries are null, - * which means that those characters disappear altogether. + * This is derived from the a standard implementation of map(s,from,to) + * function of ICON language. Within mapvec, we replace every character + * of "from" with the corresponding character in "to". + * If "to" is shorter than "from", than the corresponding entries are null, + * which means that those characters disappear altogether. */ static void map(char *dest, const char *src, const char *from, const char *to) @@ -1006,7 +994,7 @@ handledash(char *buffer, char *end, const char *src) if (src[1] == '-' && src[2]) { unsigned char i; if ((unsigned char)src[0] <= (unsigned char)src[2]) { - for (i = (unsigned char)src[0]; + for (i = (unsigned char)src[0]; i <= (unsigned char)src[2]; i++) { *p++ = i; if (p == end) { @@ -1015,7 +1003,7 @@ handledash(char *buffer, char *end, const char *src) } } } else { - for (i = (unsigned char)src[0]; + for (i = (unsigned char)src[0]; i >= (unsigned char)src[2]; i--) { *p++ = i; if (p == end) { diff --git a/usr.bin/m4/expr.c b/usr.bin/m4/expr.c index 7910403d74f0..859be1557d83 100644 --- a/usr.bin/m4/expr.c +++ b/usr.bin/m4/expr.c @@ -32,7 +32,7 @@ int yyerror(const char *msg) { fprintf(stderr, "m4: %s in expr %s\n", msg, copy_toeval); - return(0); + return 0; } int diff --git a/usr.bin/m4/extern.h b/usr.bin/m4/extern.h index 94eb66314bf4..a411cb6d5891 100644 --- a/usr.bin/m4/extern.h +++ b/usr.bin/m4/extern.h @@ -95,7 +95,7 @@ extern int exit_code; extern void chrsave(int); extern char *compute_prevep(void); extern void getdiv(int); -extern ptrdiff_t indx(const char *, const char *); +extern ptrdiff_t doindex(const char *, const char *); extern void initspaces(void); extern void killdiv(void); extern void onintr(int); diff --git a/usr.bin/m4/look.c b/usr.bin/m4/look.c index d7ad94b18bd0..c1fccd7dd8e5 100644 --- a/usr.bin/m4/look.c +++ b/usr.bin/m4/look.c @@ -127,7 +127,7 @@ setup_definition(struct macro_definition *d, const char *defn, const char *name) d->defn = __DECONST(char *, null); else d->defn = xstrdup(defn); - d->type = MACRTYPE; + d->type = MACROTYPE; } if (STREQ(name, defn)) d->type |= RECDEF; @@ -137,8 +137,8 @@ static ndptr create_entry(const char *name) { const char *end = NULL; - unsigned int i; ndptr n; + unsigned int i; i = ohash_qlookupi(¯os, name, &end); n = ohash_find(¯os, i); @@ -146,7 +146,7 @@ create_entry(const char *name) n = ohash_create_entry(¯o_info, name, &end); ohash_insert(¯os, i, n); n->trace_flags = FLAG_NO_TRACE; - n->builtin_type = MACRTYPE; + n->builtin_type = MACROTYPE; n->d = NULL; } return n; @@ -156,6 +156,7 @@ void macro_define(const char *name, const char *defn) { ndptr n = create_entry(name); + if (n->d != NULL) { if (n->d->defn != null) free_definition(n->d->defn); @@ -183,6 +184,7 @@ void macro_undefine(const char *name) { ndptr n = lookup(name); + if (n != NULL) { struct macro_definition *r, *r2; @@ -271,7 +273,7 @@ macro_getbuiltin(const char *name) ndptr p; p = lookup(name); - if (p == NULL || p->builtin_type == MACRTYPE) + if (p == NULL || p->builtin_type == MACROTYPE) return NULL; else return p; @@ -295,21 +297,21 @@ keep(char *ptr) kept_capacity *= 2; else kept_capacity = 50; - kept = xreallocarray(kept, kept_capacity, - sizeof(char *), "Out of memory while saving %d strings\n", + kept = xreallocarray(kept, kept_capacity, + sizeof(char *), "Out of memory while saving %d strings\n", kept_capacity); } kept[kept_size++] = ptr; } static int -string_in_use(const char *ptr) +string_in_use(const char *ptr) { int i; - for (i = 0; i <= sp; i++) { + + for (i = 0; i <= sp; i++) if (sstack[i] == STORAGE_MACRO && mstack[i].sstr == ptr) return 1; - } return 0; } @@ -324,7 +326,7 @@ free_definition(char *ptr) if (!string_in_use(kept[i])) { kept_size--; free(kept[i]); - if (i != kept_size) + if (i != kept_size) kept[i] = kept[kept_size]; i--; } @@ -336,4 +338,3 @@ free_definition(char *ptr) else free(ptr); } - diff --git a/usr.bin/m4/main.c b/usr.bin/m4/main.c index d68069cb10a8..5daae7fd22c2 100644 --- a/usr.bin/m4/main.c +++ b/usr.bin/m4/main.c @@ -86,7 +86,7 @@ int maxout; FILE *active; /* active output file pointer */ int ilevel = 0; /* input file stack pointer */ int oindex = 0; /* diversion index.. */ -const char *null = ""; /* as it says.. just a null.. */ +const char *null = ""; /* as it says.. just a null.. */ char **m4wraps = NULL; /* m4wraps array. */ int maxwraps = 0; /* size of m4wraps array */ int wrapindex = 0; /* current offset in m4wraps */ @@ -105,53 +105,54 @@ struct keyblk { }; static struct keyblk keywrds[] = { /* m4 keywords to be installed */ - { "include", INCLTYPE }, - { "sinclude", SINCTYPE }, - { "define", DEFITYPE }, + { "include", INCLUDETYPE }, + { "sinclude", SINCLUDETYPE }, + { "define", DEFINETYPE }, { "defn", DEFNTYPE }, - { "divert", DIVRTYPE | NOARGS }, - { "expr", EXPRTYPE }, - { "eval", EXPRTYPE }, - { "substr", SUBSTYPE }, - { "ifelse", IFELTYPE }, - { "ifdef", IFDFTYPE }, - { "len", LENGTYPE }, + { "divert", DIVERTTYPE | NOARGS }, + { "eval", EVALTYPE }, + { "expr", EVALTYPE }, + { "substr", SUBSTRTYPE }, + { "ifelse", IFELSETYPE }, + { "ifdef", IFDEFTYPE }, + { "len", LENTYPE }, { "incr", INCRTYPE }, { "decr", DECRTYPE }, - { "dnl", DNLNTYPE | NOARGS }, - { "changequote", CHNQTYPE | NOARGS }, - { "changecom", CHNCTYPE | NOARGS }, - { "index", INDXTYPE }, + { "dnl", DNLTYPE | NOARGS }, + { "changequote", CHANGEQUOTETYPE | NOARGS }, + { "changecom", CHANGECOMTYPE | NOARGS }, + { "index", INDEXTYPE }, #ifdef EXTENDED - { "paste", PASTTYPE }, - { "spaste", SPASTYPE }, + { "paste", PASTETYPE }, + { "spaste", SPASTETYPE }, /* Newer extensions, needed to handle gnu-m4 scripts */ { "indir", INDIRTYPE}, { "builtin", BUILTINTYPE}, - { "patsubst", PATSTYPE}, + { "patsubst", PATSUBSTTYPE}, { "regexp", REGEXPTYPE}, { "esyscmd", ESYSCMDTYPE}, { "__file__", FILENAMETYPE | NOARGS}, { "__line__", LINETYPE | NOARGS}, #endif - { "popdef", POPDTYPE }, - { "pushdef", PUSDTYPE }, - { "dumpdef", DUMPTYPE | NOARGS }, - { "shift", SHIFTYPE | NOARGS }, - { "translit", TRNLTYPE }, - { "undefine", UNDFTYPE }, - { "undivert", UNDVTYPE | NOARGS }, - { "divnum", DIVNTYPE | NOARGS }, - { "maketemp", MKTMTYPE }, - { "mkstemp", MKTMTYPE }, - { "errprint", ERRPTYPE | NOARGS }, - { "m4wrap", M4WRTYPE | NOARGS }, - { "m4exit", EXITTYPE | NOARGS }, - { "syscmd", SYSCTYPE }, - { "sysval", SYSVTYPE | NOARGS }, + { "popdef", POPDEFTYPE }, + { "pushdef", PUSHDEFTYPE }, + { "dumpdef", DUMPDEFTYPE | NOARGS }, + { "shift", SHIFTTYPE | NOARGS }, + { "translit", TRANSLITTYPE }, + { "undefine", UNDEFINETYPE }, + { "undivert", UNDIVERTTYPE | NOARGS }, + { "divnum", DIVNUMTYPE | NOARGS }, + { "maketemp", MKSTEMPTYPE }, + { "mkstemp", MKSTEMPTYPE }, + { "errprint", ERRPRINTTYPE | NOARGS }, + { "m4wrap", M4WRAPTYPE | NOARGS }, + { "m4exit", M4EXITTYPE | NOARGS }, + { "syscmd", SYSCMDTYPE }, + { "sysval", SYSVALTYPE | NOARGS }, { "traceon", TRACEONTYPE | NOARGS }, { "traceoff", TRACEOFFTYPE | NOARGS }, +/* Macro that expands to itself, signature of the current OS */ { "unix", SELFTYPE | NOARGS }, }; @@ -387,8 +388,7 @@ macro(void) CHRSAVE(l); } } - } - while (nlpar != 0); + } while (nlpar != 0); } else if (sp < 0 && LOOK_AHEAD(t, scommt)) { reallyoutputstr(scommt); diff --git a/usr.bin/m4/mdef.h b/usr.bin/m4/mdef.h index d4fa5b0e0c14..259d4d2baa55 100644 --- a/usr.bin/m4/mdef.h +++ b/usr.bin/m4/mdef.h @@ -41,50 +41,50 @@ # define UNUSED #endif -#define MACRTYPE 1 -#define DEFITYPE 2 -#define EXPRTYPE 3 -#define SUBSTYPE 4 -#define IFELTYPE 5 -#define LENGTYPE 6 -#define CHNQTYPE 7 -#define SYSCTYPE 8 -#define UNDFTYPE 9 -#define INCLTYPE 10 -#define SINCTYPE 11 -#define PASTTYPE 12 -#define SPASTYPE 13 -#define INCRTYPE 14 -#define IFDFTYPE 15 -#define PUSDTYPE 16 -#define POPDTYPE 17 -#define SHIFTYPE 18 -#define DECRTYPE 19 -#define DIVRTYPE 20 -#define UNDVTYPE 21 -#define DIVNTYPE 22 -#define MKTMTYPE 23 -#define ERRPTYPE 24 -#define M4WRTYPE 25 -#define TRNLTYPE 26 -#define DNLNTYPE 27 -#define DUMPTYPE 28 -#define CHNCTYPE 29 -#define INDXTYPE 30 -#define SYSVTYPE 31 -#define EXITTYPE 32 -#define DEFNTYPE 33 -#define SELFTYPE 34 -#define INDIRTYPE 35 -#define BUILTINTYPE 36 -#define PATSTYPE 37 -#define FILENAMETYPE 38 -#define LINETYPE 39 -#define REGEXPTYPE 40 -#define ESYSCMDTYPE 41 -#define TRACEONTYPE 42 -#define TRACEOFFTYPE 43 -#define FORMATTYPE 44 +#define MACROTYPE 1 +#define DEFINETYPE 2 +#define EVALTYPE 3 +#define SUBSTRTYPE 4 +#define IFELSETYPE 5 +#define LENTYPE 6 +#define CHANGEQUOTETYPE 7 +#define SYSCMDTYPE 8 +#define UNDEFINETYPE 9 +#define INCLUDETYPE 10 +#define SINCLUDETYPE 11 +#define PASTETYPE 12 +#define SPASTETYPE 13 +#define INCRTYPE 14 +#define IFDEFTYPE 15 +#define PUSHDEFTYPE 16 +#define POPDEFTYPE 17 +#define SHIFTTYPE 18 +#define DECRTYPE 19 +#define DIVERTTYPE 20 +#define UNDIVERTTYPE 21 +#define DIVNUMTYPE 22 +#define MKSTEMPTYPE 23 +#define ERRPRINTTYPE 24 +#define M4WRAPTYPE 25 +#define TRANSLITTYPE 26 +#define DNLTYPE 27 +#define DUMPDEFTYPE 28 +#define CHANGECOMTYPE 29 +#define INDEXTYPE 30 +#define SYSVALTYPE 31 +#define M4EXITTYPE 32 +#define DEFNTYPE 33 +#define SELFTYPE 34 +#define INDIRTYPE 35 +#define BUILTINTYPE 36 +#define PATSUBSTTYPE 37 +#define FILENAMETYPE 38 +#define LINETYPE 39 +#define REGEXPTYPE 40 +#define ESYSCMDTYPE 41 +#define TRACEONTYPE 42 +#define TRACEOFFTYPE 43 +#define FORMATTYPE 44 #define BUILTIN_MARKER "__builtin_" @@ -204,7 +204,7 @@ struct input_file { mstack[sp].sstr = macro_getdef(p)->defn;\ sstack[sp] = STORAGE_MACRO; \ } while (0) - + /* * . . diff --git a/usr.bin/m4/misc.c b/usr.bin/m4/misc.c index fd72292aeac0..24e91c572f4d 100644 --- a/usr.bin/m4/misc.c +++ b/usr.bin/m4/misc.c @@ -68,7 +68,7 @@ unsigned char *endpbb; /* end of push-back buffer */ * find the index of second str in the first str. */ ptrdiff_t -indx(const char *s1, const char *s2) +doindex(const char *s1, const char *s2) { char *t; @@ -135,8 +135,7 @@ pbnumbase(int n, int base, int d) do { pushback(digits[num % base]); printed++; - } - while ((num /= base) > 0); + } while ((num /= base) > 0); while (printed++ < d) pushback('0'); @@ -153,8 +152,7 @@ pbunsigned(unsigned long n) { do { pushback(n % 10 + '0'); - } - while ((n /= 10) > 0); + } while ((n /= 10) > 0); } void @@ -186,9 +184,9 @@ enlarge_strspace(void) memcpy(newstrspace, strspace, strsize/2); for (i = 0; i <= sp; i++) if (sstack[i] == STORAGE_STRSPACE) - mstack[i].sstr = (mstack[i].sstr - strspace) - + newstrspace; - ep = (ep-strspace) + newstrspace; + mstack[i].sstr = (mstack[i].sstr - strspace) + + newstrspace; + ep = (ep - strspace) + newstrspace; free(strspace); strspace = newstrspace; endest = strspace + strsize; diff --git a/usr.bin/mdo/mdo.1 b/usr.bin/mdo/mdo.1 index 1de694c69e40..c1a0295535df 100644 --- a/usr.bin/mdo/mdo.1 +++ b/usr.bin/mdo/mdo.1 @@ -9,7 +9,7 @@ .\" <olce@FreeBSD.org> at Kumacom SARL under sponsorship from the FreeBSD .\" Foundation. .\" -.Dd November 26, 2025 +.Dd March 22, 2026 .Dt MDO 1 .Os .Sh NAME @@ -232,7 +232,8 @@ mdo -k --euid root --svuid root id .Sh SEE ALSO .Xr su 1 , .Xr setcred 2 , -.Xr mac_do 4 +.Xr mac_do 4 , +.Xr security 7 .Sh HISTORY The .Nm diff --git a/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.8 b/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.8 index ac32a675aa63..fd0118655a67 100644 --- a/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.8 +++ b/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.8 @@ -48,8 +48,8 @@ device. .Pp This utility will .Em only -work with Intel Wireless 7260/8260/9260 chip based Bluetooth USB devices -and some of their successors. +work with Intel Wireless 7260/8260/9260 and newer chip based Bluetooth +USB devices, including AX and BE series wireless adapters. The identification is currently based on USB vendor ID/product ID pair. The vendor ID should be 0x8087 .Pq Dv USB_VENDOR_INTEL2 diff --git a/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.conf b/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.conf index e30a3c15ccaa..d48206827f90 100644 --- a/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.conf +++ b/usr.sbin/bluetooth/iwmbtfw/iwmbtfw.conf @@ -7,6 +7,6 @@ notify 100 { match "subsystem" "DEVICE"; match "type" "ATTACH"; match "vendor" "0x8087"; - match "product" "(0x07dc|0x0a2a|0x0aa7|0x0a2b|0x0aaa|0x0025|0x0026|0x0029|0x0032|0x0033)"; + match "product" "(0x07dc|0x0a2a|0x0aa7|0x0a2b|0x0aaa|0x0025|0x0026|0x0029|0x0032|0x0033|0x0035|0x0036)"; action "/usr/sbin/iwmbtfw -d $cdev -f /usr/local/share/iwmbt-firmware"; }; diff --git a/usr.sbin/bluetooth/iwmbtfw/main.c b/usr.sbin/bluetooth/iwmbtfw/main.c index b27c5ad62239..1e11cc468015 100644 --- a/usr.sbin/bluetooth/iwmbtfw/main.c +++ b/usr.sbin/bluetooth/iwmbtfw/main.c @@ -81,6 +81,8 @@ static struct iwmbt_devid iwmbt_list[] = { /* Intel Wireless 9260/9560 and successors */ { .vendor_id = 0x8087, .product_id = 0x0032, .device = IWMBT_DEVICE_9260 }, { .vendor_id = 0x8087, .product_id = 0x0033, .device = IWMBT_DEVICE_9260 }, + { .vendor_id = 0x8087, .product_id = 0x0035, .device = IWMBT_DEVICE_9260 }, + { .vendor_id = 0x8087, .product_id = 0x0036, .device = IWMBT_DEVICE_9260 }, }; static enum iwmbt_device diff --git a/usr.sbin/bluetooth/rtlbtfw/main.c b/usr.sbin/bluetooth/rtlbtfw/main.c index 37c902739206..dc9ccd6c5fcd 100644 --- a/usr.sbin/bluetooth/rtlbtfw/main.c +++ b/usr.sbin/bluetooth/rtlbtfw/main.c @@ -57,21 +57,19 @@ struct rtlbt_devid { }; static struct rtlbt_devid rtlbt_list[] = { + /* + * Non-Realtek vendors using Realtek Bluetooth chipsets. + * Devices with vendor 0x0bda are already matched by the + * generic check in rtlbt_find_device(). + */ + /* Realtek 8821CE Bluetooth devices */ { .vendor_id = 0x13d3, .product_id = 0x3529 }, - /* Realtek 8822CE Bluetooth devices */ - { .vendor_id = 0x0bda, .product_id = 0xb00c }, - { .vendor_id = 0x0bda, .product_id = 0xc822 }, - /* Realtek 8851BE Bluetooth devices */ { .vendor_id = 0x13d3, .product_id = 0x3600 }, /* Realtek 8852AE Bluetooth devices */ - { .vendor_id = 0x0bda, .product_id = 0x2852 }, - { .vendor_id = 0x0bda, .product_id = 0xc852 }, - { .vendor_id = 0x0bda, .product_id = 0x385a }, - { .vendor_id = 0x0bda, .product_id = 0x4852 }, { .vendor_id = 0x04c5, .product_id = 0x165c }, { .vendor_id = 0x04ca, .product_id = 0x4006 }, { .vendor_id = 0x0cb8, .product_id = 0xc549 }, @@ -88,9 +86,6 @@ static struct rtlbt_devid rtlbt_list[] = { /* Realtek 8852BE Bluetooth devices */ { .vendor_id = 0x0cb8, .product_id = 0xc559 }, - { .vendor_id = 0x0bda, .product_id = 0x4853 }, - { .vendor_id = 0x0bda, .product_id = 0x887b }, - { .vendor_id = 0x0bda, .product_id = 0xb85b }, { .vendor_id = 0x13d3, .product_id = 0x3570 }, { .vendor_id = 0x13d3, .product_id = 0x3571 }, { .vendor_id = 0x13d3, .product_id = 0x3572 }, @@ -98,11 +93,7 @@ static struct rtlbt_devid rtlbt_list[] = { { .vendor_id = 0x0489, .product_id = 0xe123 }, { .vendor_id = 0x0489, .product_id = 0xe125 }, - /* Realtek 8852BT/8852BE-VT Bluetooth devices */ - { .vendor_id = 0x0bda, .product_id = 0x8520 }, - /* Realtek 8922AE Bluetooth devices */ - { .vendor_id = 0x0bda, .product_id = 0x8922 }, { .vendor_id = 0x13d3, .product_id = 0x3617 }, { .vendor_id = 0x13d3, .product_id = 0x3616 }, { .vendor_id = 0x0489, .product_id = 0xe130 }, @@ -124,7 +115,6 @@ static struct rtlbt_devid rtlbt_list[] = { { .vendor_id = 0x7392, .product_id = 0xa611 }, /* Realtek 8723DE Bluetooth devices */ - { .vendor_id = 0x0bda, .product_id = 0xb009 }, { .vendor_id = 0x2ff8, .product_id = 0xb011 }, /* Realtek 8761BUV Bluetooth devices */ @@ -132,7 +122,6 @@ static struct rtlbt_devid rtlbt_list[] = { { .vendor_id = 0x2357, .product_id = 0x0604 }, { .vendor_id = 0x0b05, .product_id = 0x190e }, { .vendor_id = 0x2550, .product_id = 0x8761 }, - { .vendor_id = 0x0bda, .product_id = 0x8771 }, { .vendor_id = 0x6655, .product_id = 0x8771 }, { .vendor_id = 0x7392, .product_id = 0xc611 }, { .vendor_id = 0x2b89, .product_id = 0x8761 }, @@ -158,7 +147,6 @@ static struct rtlbt_devid rtlbt_list[] = { { .vendor_id = 0x13d3, .product_id = 0x3555 }, { .vendor_id = 0x2ff8, .product_id = 0x3051 }, { .vendor_id = 0x1358, .product_id = 0xc123 }, - { .vendor_id = 0x0bda, .product_id = 0xc123 }, { .vendor_id = 0x0cb5, .product_id = 0xc547 }, }; diff --git a/usr.sbin/bluetooth/rtlbtfw/rtlbtfw.conf b/usr.sbin/bluetooth/rtlbtfw/rtlbtfw.conf index 0a2b33d33b18..f27e0ee50ccc 100644 --- a/usr.sbin/bluetooth/rtlbtfw/rtlbtfw.conf +++ b/usr.sbin/bluetooth/rtlbtfw/rtlbtfw.conf @@ -26,16 +26,6 @@ notify 100 { action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; }; -# Realtek 8822CE Bluetooth devices -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "(0xb00c|0xc822)"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; - # Realtek 8851BE Bluetooth devices notify 100 { match "system" "USB"; @@ -51,14 +41,6 @@ notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "(0x2852|0xc852|0x385a|0x4852)"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x04c5"; match "product" "0x165c"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; @@ -135,14 +117,6 @@ notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "(0x4853|0x887b|0xb85b)"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x13d3"; match "product" "(0x3570|0x3571|0x3572|0x3591)"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; @@ -156,29 +130,11 @@ notify 100 { action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; }; -# Realtek 8852BT/8852BE-VT Bluetooth devices -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "0x8520"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; - # Realtek 8922AE Bluetooth devices notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "0x8922"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x13d3"; match "product" "(0x3617|0x3616)"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; @@ -251,14 +207,6 @@ notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "0xb009"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x2ff8"; match "product" "0xb011"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; @@ -301,14 +249,6 @@ notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "0x8771"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x6655"; match "product" "0x8771"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; @@ -419,14 +359,6 @@ notify 100 { match "system" "USB"; match "subsystem" "DEVICE"; match "type" "ATTACH"; - match "vendor" "0x0bda"; - match "product" "0xc123"; - action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; -}; -notify 100 { - match "system" "USB"; - match "subsystem" "DEVICE"; - match "type" "ATTACH"; match "vendor" "0x0cb5"; match "product" "0xc547"; action "/usr/sbin/rtlbtfw -d $cdev -f /usr/local/share/rtlbt-firmware"; diff --git a/usr.sbin/newsyslog/newsyslog.8 b/usr.sbin/newsyslog/newsyslog.8 index aa89ef4b779a..eb927f7e0a68 100644 --- a/usr.sbin/newsyslog/newsyslog.8 +++ b/usr.sbin/newsyslog/newsyslog.8 @@ -14,12 +14,12 @@ .\" the suitability of this software for any purpose. It is .\" provided "as is" without express or implied warranty. .\" -.Dd September 22, 2025 +.Dd March 8, 2026 .Dt NEWSYSLOG 8 .Os .Sh NAME .Nm newsyslog -.Nd maintain system log files to manageable sizes +.Nd rotate system message log files to maintain manageable sizes .Sh SYNOPSIS .Nm .Op Fl CFNPnrsv diff --git a/usr.sbin/newsyslog/newsyslog.c b/usr.sbin/newsyslog/newsyslog.c index 084aeb36b052..fc8ba7df79af 100644 --- a/usr.sbin/newsyslog/newsyslog.c +++ b/usr.sbin/newsyslog/newsyslog.c @@ -1955,9 +1955,9 @@ do_rotate(const struct conf_entry *ent) if (noaction) printf("\tmv %s %s\n", zfile1, zfile2); - else { - /* XXX - Ought to be checking for failure! */ - (void)rename(zfile1, zfile2); + else if (rename(zfile1, zfile2) != 0) { + warn("can't mv %s to %s", zfile1, zfile2); + continue; } change_attrs(zfile2, ent); if (ent->compress && strlen(logfile_suffix) == 0) { diff --git a/usr.sbin/newsyslog/newsyslog.conf.5 b/usr.sbin/newsyslog/newsyslog.conf.5 index b0c1e78eb085..1683a1018f9e 100644 --- a/usr.sbin/newsyslog/newsyslog.conf.5 +++ b/usr.sbin/newsyslog/newsyslog.conf.5 @@ -18,14 +18,12 @@ .\" the suitability of this software for any purpose. It is .\" provided "as is" without express or implied warranty. .\" -.Dd February 4, 2026 +.Dd March 8, 2026 .Dt NEWSYSLOG.CONF 5 .Os .Sh NAME .Nm newsyslog.conf -.Nd -.Xr newsyslog 8 -configuration file +.Nd system message log rotator configuration file .Sh DESCRIPTION The .Nm diff --git a/usr.sbin/syslogd/syslog.conf.5 b/usr.sbin/syslogd/syslog.conf.5 index 691f2cdd7062..f641aedee3e5 100644 --- a/usr.sbin/syslogd/syslog.conf.5 +++ b/usr.sbin/syslogd/syslog.conf.5 @@ -25,14 +25,12 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd December 10, 2020 +.Dd March 8, 2026 .Dt SYSLOG.CONF 5 .Os .Sh NAME .Nm syslog.conf -.Nd -.Xr syslogd 8 -configuration file +.Nd syslogd system message log configuration file .Sh DESCRIPTION The .Nm diff --git a/usr.sbin/syslogd/syslogd.c b/usr.sbin/syslogd/syslogd.c index 1b894ae54fc6..59cb56fd5970 100644 --- a/usr.sbin/syslogd/syslogd.c +++ b/usr.sbin/syslogd/syslogd.c @@ -781,15 +781,21 @@ main(int argc, char *argv[]) case EVFILT_SIGNAL: switch (ev.ident) { case SIGHUP: + /* Reload */ init(true); break; case SIGINT: case SIGQUIT: + /* Ignore these unless -F and / or -d */ + if (!Foreground && !Debug) + break; + /* FALLTHROUGH */ case SIGTERM: - if (ev.ident == SIGTERM || Debug) - die(ev.ident); + /* Terminate */ + die(ev.ident); break; case SIGALRM: + /* Mark and flush */ markit(); break; } @@ -2814,7 +2820,7 @@ prop_filter_compile(const char *cfilter) pfilter.cmp_type = FILT_CMP_REGEX; else if (strcasecmp(argv[1], "ereregex") == 0) { pfilter.cmp_type = FILT_CMP_REGEX; - pfilter.cmp_flags |= REG_EXTENDED; + pfilter.cmp_flags |= FILT_FLAG_EXTENDED; } else { dprintf("unknown cmp function"); goto error; diff --git a/usr.sbin/syslogd/tests/syslogd_test.sh b/usr.sbin/syslogd/tests/syslogd_test.sh index 1f235c476c49..253a26258959 100644 --- a/usr.sbin/syslogd/tests/syslogd_test.sh +++ b/usr.sbin/syslogd/tests/syslogd_test.sh @@ -238,6 +238,28 @@ prop_filter_body() syslogd_check_log_nomatch "prop1: FreeBSD" syslogd_check_log_nomatch "prop2: freebsd" syslogd_check_log "prop3: Solaris" + + printf ":msg,ereregex,\"substring1|substring2\"\nuser.debug\t${SYSLOGD_LOGFILE}\n" \ + > "${SYSLOGD_CONFIG}" + syslogd_reload + + syslogd_log -p user.debug -t "prop1" -h "${SYSLOGD_LOCAL_SOCKET}" "substring1" + syslogd_check_log "prop1: substring1" + syslogd_log -p user.debug -t "prop2" -h "${SYSLOGD_LOCAL_SOCKET}" "substring2" + syslogd_check_log "prop2: substring2" + syslogd_log -p user.debug -t "prop3" -h "${SYSLOGD_LOCAL_SOCKET}" "substring3" + syslogd_check_log_nomatch "prop3: substring3" + + printf ":msg,!ereregex,\"substring1|substring2\"\nuser.debug\t${SYSLOGD_LOGFILE}\n" \ + > "${SYSLOGD_CONFIG}" + syslogd_reload + + syslogd_log -p user.debug -t "prop1" -h "${SYSLOGD_LOCAL_SOCKET}" "substring1" + syslogd_check_log_nomatch "prop1: substring1" + syslogd_log -p user.debug -t "prop2" -h "${SYSLOGD_LOCAL_SOCKET}" "substring2" + syslogd_check_log_nomatch "prop2: substring2" + syslogd_log -p user.debug -t "prop3" -h "${SYSLOGD_LOCAL_SOCKET}" "substring3" + syslogd_check_log "prop3: substring3" } prop_filter_cleanup() { |
